diff --git a/core/src/config/Config.cpp b/core/src/config/Config.cpp index b8f65a1702..eab0d70410 100644 --- a/core/src/config/Config.cpp +++ b/core/src/config/Config.cpp @@ -788,22 +788,22 @@ Config::CheckDBConfigPreloadTable(const std::string& value) { std::unordered_set table_set; - for (auto& table : tables) { - if (!ValidationUtil::ValidateTableName(table).ok()) { - return Status(SERVER_INVALID_ARGUMENT, "Invalid table name: " + table); + for (auto& collection : tables) { + if (!ValidationUtil::ValidateCollectionName(collection).ok()) { + return Status(SERVER_INVALID_ARGUMENT, "Invalid collection name: " + collection); } bool exist = false; - auto status = DBWrapper::DB()->HasNativeTable(table, exist); + auto status = DBWrapper::DB()->HasNativeTable(collection, exist); if (!(status.ok() && exist)) { - return Status(SERVER_TABLE_NOT_EXIST, "Table " + table + " not exist"); + return Status(SERVER_TABLE_NOT_EXIST, "Collection " + collection + " not exist"); } - table_set.insert(table); + table_set.insert(collection); } if (table_set.size() != tables.size()) { std::string msg = "Invalid preload tables. " - "Possible reason: db_config.preload_table contains duplicate table."; + "Possible reason: db_config.preload_table contains duplicate collection."; return Status(SERVER_INVALID_ARGUMENT, msg); } diff --git a/core/src/db/DB.h b/core/src/db/DB.h index d9aa9cc964..028e2ea4c0 100644 --- a/core/src/db/DB.h +++ b/core/src/db/DB.h @@ -44,82 +44,82 @@ class DB { Stop() = 0; virtual Status - CreateTable(meta::TableSchema& table_schema_) = 0; + CreateTable(meta::CollectionSchema& table_schema_) = 0; virtual Status - DropTable(const std::string& table_id) = 0; + DropTable(const std::string& collection_id) = 0; virtual Status - DescribeTable(meta::TableSchema& table_schema_) = 0; + DescribeTable(meta::CollectionSchema& table_schema_) = 0; virtual Status - HasTable(const std::string& table_id, bool& has_or_not_) = 0; + HasTable(const std::string& collection_id, bool& has_or_not_) = 0; virtual Status - HasNativeTable(const std::string& table_id, bool& has_or_not_) = 0; + HasNativeTable(const std::string& collection_id, bool& has_or_not_) = 0; virtual Status - AllTables(std::vector& table_schema_array) = 0; + AllTables(std::vector& table_schema_array) = 0; virtual Status - GetTableInfo(const std::string& table_id, TableInfo& table_info) = 0; + GetTableInfo(const std::string& collection_id, TableInfo& table_info) = 0; virtual Status - GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0; + GetTableRowCount(const std::string& collection_id, uint64_t& row_count) = 0; virtual Status - PreloadTable(const std::string& table_id) = 0; + PreloadTable(const std::string& collection_id) = 0; virtual Status - UpdateTableFlag(const std::string& table_id, int64_t flag) = 0; + UpdateTableFlag(const std::string& collection_id, int64_t flag) = 0; virtual Status - CreatePartition(const std::string& table_id, const std::string& partition_name, + CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& partition_tag) = 0; virtual Status DropPartition(const std::string& partition_name) = 0; virtual Status - DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0; + DropPartitionByTag(const std::string& collection_id, const std::string& partition_tag) = 0; virtual Status - ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) = 0; + ShowPartitions(const std::string& collection_id, std::vector& partition_schema_array) = 0; virtual Status - InsertVectors(const std::string& table_id, const std::string& partition_tag, VectorsData& vectors) = 0; + InsertVectors(const std::string& collection_id, const std::string& partition_tag, VectorsData& vectors) = 0; virtual Status - DeleteVector(const std::string& table_id, IDNumber vector_id) = 0; + DeleteVector(const std::string& collection_id, IDNumber vector_id) = 0; virtual Status - DeleteVectors(const std::string& table_id, IDNumbers vector_ids) = 0; + DeleteVectors(const std::string& collection_id, IDNumbers vector_ids) = 0; virtual Status - Flush(const std::string& table_id) = 0; + Flush(const std::string& collection_id) = 0; virtual Status Flush() = 0; virtual Status - Compact(const std::string& table_id) = 0; + Compact(const std::string& collection_id) = 0; virtual Status - GetVectorByID(const std::string& table_id, const IDNumber& vector_id, VectorsData& vector) = 0; + GetVectorByID(const std::string& collection_id, const IDNumber& vector_id, VectorsData& vector) = 0; virtual Status - GetVectorIDs(const std::string& table_id, const std::string& segment_id, IDNumbers& vector_ids) = 0; + GetVectorIDs(const std::string& collection_id, const std::string& segment_id, IDNumbers& vector_ids) = 0; // virtual Status // Merge(const std::set& table_ids) = 0; virtual Status - QueryByID(const std::shared_ptr& context, const std::string& table_id, + QueryByID(const std::shared_ptr& context, const std::string& collection_id, const std::vector& partition_tags, uint64_t k, const milvus::json& extra_params, IDNumber vector_id, ResultIds& result_ids, ResultDistances& result_distances) = 0; virtual Status - Query(const std::shared_ptr& context, const std::string& table_id, + Query(const std::shared_ptr& context, const std::string& collection_id, const std::vector& partition_tags, uint64_t k, const milvus::json& extra_params, const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0; @@ -132,13 +132,13 @@ class DB { Size(uint64_t& result) = 0; virtual Status - CreateIndex(const std::string& table_id, const TableIndex& index) = 0; + CreateIndex(const std::string& collection_id, const TableIndex& index) = 0; virtual Status - DescribeIndex(const std::string& table_id, TableIndex& index) = 0; + DescribeIndex(const std::string& collection_id, TableIndex& index) = 0; virtual Status - DropIndex(const std::string& table_id) = 0; + DropIndex(const std::string& collection_id) = 0; virtual Status DropAll() = 0; diff --git a/core/src/db/DBImpl.cpp b/core/src/db/DBImpl.cpp index fca45d369b..3912215c77 100644 --- a/core/src/db/DBImpl.cpp +++ b/core/src/db/DBImpl.cpp @@ -178,35 +178,35 @@ DBImpl::DropAll() { } Status -DBImpl::CreateTable(meta::TableSchema& table_schema) { +DBImpl::CreateTable(meta::CollectionSchema& table_schema) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - meta::TableSchema temp_schema = table_schema; + meta::CollectionSchema temp_schema = table_schema; temp_schema.index_file_size_ *= ONE_MB; // store as MB if (options_.wal_enable_) { - temp_schema.flush_lsn_ = wal_mgr_->CreateTable(table_schema.table_id_); + temp_schema.flush_lsn_ = wal_mgr_->CreateTable(table_schema.collection_id_); } return meta_ptr_->CreateTable(temp_schema); } Status -DBImpl::DropTable(const std::string& table_id) { +DBImpl::DropTable(const std::string& collection_id) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } if (options_.wal_enable_) { - wal_mgr_->DropTable(table_id); + wal_mgr_->DropTable(collection_id); } - return DropTableRecursively(table_id); + return DropTableRecursively(collection_id); } Status -DBImpl::DescribeTable(meta::TableSchema& table_schema) { +DBImpl::DescribeTable(meta::CollectionSchema& table_schema) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } @@ -217,22 +217,22 @@ DBImpl::DescribeTable(meta::TableSchema& table_schema) { } Status -DBImpl::HasTable(const std::string& table_id, bool& has_or_not) { +DBImpl::HasTable(const std::string& collection_id, bool& has_or_not) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - return meta_ptr_->HasTable(table_id, has_or_not); + return meta_ptr_->HasTable(collection_id, has_or_not); } Status -DBImpl::HasNativeTable(const std::string& table_id, bool& has_or_not_) { +DBImpl::HasNativeTable(const std::string& collection_id, bool& has_or_not_) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_id; + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { has_or_not_ = false; @@ -249,12 +249,12 @@ DBImpl::HasNativeTable(const std::string& table_id, bool& has_or_not_) { } Status -DBImpl::AllTables(std::vector& table_schema_array) { +DBImpl::AllTables(std::vector& table_schema_array) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - std::vector all_tables; + std::vector all_tables; auto status = meta_ptr_->AllTables(all_tables); // only return real tables, dont return partition tables @@ -269,22 +269,22 @@ DBImpl::AllTables(std::vector& table_schema_array) { } Status -DBImpl::GetTableInfo(const std::string& table_id, TableInfo& table_info) { +DBImpl::GetTableInfo(const std::string& collection_id, TableInfo& table_info) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } // step1: get all partition ids - std::vector> name2tag = {{table_id, milvus::engine::DEFAULT_PARTITON_TAG}}; - std::vector partition_array; - auto status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector> name2tag = {{collection_id, milvus::engine::DEFAULT_PARTITON_TAG}}; + std::vector partition_array; + auto status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - name2tag.push_back(std::make_pair(schema.table_id_, schema.partition_tag_)); + name2tag.push_back(std::make_pair(schema.collection_id_, schema.partition_tag_)); } - // step2: get native table info - std::vector file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX, - meta::TableFileSchema::FILE_TYPE::INDEX}; + // step2: get native collection info + std::vector file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX, + meta::SegmentSchema::FILE_TYPE::INDEX}; static std::map index_type_name = { {(int32_t)engine::EngineType::FAISS_IDMAP, "IDMAP"}, @@ -301,10 +301,10 @@ DBImpl::GetTableInfo(const std::string& table_id, TableInfo& table_info) { }; for (auto& name_tag : name2tag) { - meta::TableFilesSchema table_files; + meta::SegmentsSchema table_files; status = meta_ptr_->FilesByType(name_tag.first, file_types, table_files); if (!status.ok()) { - std::string err_msg = "Failed to get table info: " + status.ToString(); + std::string err_msg = "Failed to get collection info: " + status.ToString(); ENGINE_LOG_ERROR << err_msg; return Status(DB_ERROR, err_msg); } @@ -320,7 +320,7 @@ DBImpl::GetTableInfo(const std::string& table_id, TableInfo& table_info) { } PartitionStat partition_stat; - if (name_tag.first == table_id) { + if (name_tag.first == collection_id) { partition_stat.tag_ = milvus::engine::DEFAULT_PARTITON_TAG; } else { partition_stat.tag_ = name_tag.second; @@ -334,23 +334,23 @@ DBImpl::GetTableInfo(const std::string& table_id, TableInfo& table_info) { } Status -DBImpl::PreloadTable(const std::string& table_id) { +DBImpl::PreloadTable(const std::string& collection_id) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - // step 1: get all table files from parent table - meta::TableFilesSchema files_array; - auto status = GetFilesToSearch(table_id, files_array); + // step 1: get all collection files from parent collection + meta::SegmentsSchema files_array; + auto status = GetFilesToSearch(collection_id, files_array); if (!status.ok()) { return status; } // step 2: get files from partition tables - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - status = GetFilesToSearch(schema.table_id_, files_array); + status = GetFilesToSearch(schema.collection_id_, files_array); } int64_t size = 0; @@ -359,14 +359,14 @@ DBImpl::PreloadTable(const std::string& table_id) { int64_t available_size = cache_total - cache_usage; // step 3: load file one by one - ENGINE_LOG_DEBUG << "Begin pre-load table:" + table_id + ", totally " << files_array.size() + ENGINE_LOG_DEBUG << "Begin pre-load collection:" + collection_id + ", totally " << files_array.size() << " files need to be pre-loaded"; - TimeRecorderAuto rc("Pre-load table:" + table_id); + TimeRecorderAuto rc("Pre-load collection:" + collection_id); for (auto& file : files_array) { EngineType engine_type; - if (file.file_type_ == meta::TableFileSchema::FILE_TYPE::RAW || - file.file_type_ == meta::TableFileSchema::FILE_TYPE::TO_INDEX || - file.file_type_ == meta::TableFileSchema::FILE_TYPE::BACKUP) { + if (file.file_type_ == meta::SegmentSchema::FILE_TYPE::RAW || + file.file_type_ == meta::SegmentSchema::FILE_TYPE::TO_INDEX || + file.file_type_ == meta::SegmentSchema::FILE_TYPE::BACKUP) { engine_type = utils::IsBinaryMetricType(file.metric_type_) ? EngineType::FAISS_BIN_IDMAP : EngineType::FAISS_IDMAP; } else { @@ -396,7 +396,7 @@ DBImpl::PreloadTable(const std::string& table_id) { return Status(SERVER_CACHE_FULL, "Cache is full"); } } catch (std::exception& ex) { - std::string msg = "Pre-load table encounter exception: " + std::string(ex.what()); + std::string msg = "Pre-load collection encounter exception: " + std::string(ex.what()); ENGINE_LOG_ERROR << msg; return Status(DB_ERROR, msg); } @@ -406,33 +406,33 @@ DBImpl::PreloadTable(const std::string& table_id) { } Status -DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { +DBImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - return meta_ptr_->UpdateTableFlag(table_id, flag); + return meta_ptr_->UpdateTableFlag(collection_id, flag); } Status -DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) { +DBImpl::GetTableRowCount(const std::string& collection_id, uint64_t& row_count) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - return GetTableRowCountRecursively(table_id, row_count); + return GetTableRowCountRecursively(collection_id, row_count); } Status -DBImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, +DBImpl::CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& partition_tag) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } uint64_t lsn = 0; - meta_ptr_->GetTableFlushLSN(table_id, lsn); - return meta_ptr_->CreatePartition(table_id, partition_name, partition_tag, lsn); + meta_ptr_->GetTableFlushLSN(collection_id, lsn); + return meta_ptr_->CreatePartition(collection_id, partition_name, partition_tag, lsn); } Status @@ -442,13 +442,13 @@ DBImpl::DropPartition(const std::string& partition_name) { } mem_mgr_->EraseMemVector(partition_name); // not allow insert - auto status = meta_ptr_->DropPartition(partition_name); // soft delete table + auto status = meta_ptr_->DropPartition(partition_name); // soft delete collection if (!status.ok()) { ENGINE_LOG_ERROR << status.message(); return status; } - // scheduler will determine when to delete table files + // scheduler will determine when to delete collection files auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); scheduler::DeleteJobPtr job = std::make_shared(partition_name, meta_ptr_, nres); scheduler::JobMgrInst::GetInstance()->Put(job); @@ -458,13 +458,13 @@ DBImpl::DropPartition(const std::string& partition_name) { } Status -DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) { +DBImpl::DropPartitionByTag(const std::string& collection_id, const std::string& partition_tag) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } std::string partition_name; - auto status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name); + auto status = meta_ptr_->GetPartitionName(collection_id, partition_tag, partition_name); if (!status.ok()) { ENGINE_LOG_ERROR << status.message(); return status; @@ -474,22 +474,22 @@ DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& parti } Status -DBImpl::ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) { +DBImpl::ShowPartitions(const std::string& collection_id, std::vector& partition_schema_array) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - return meta_ptr_->ShowPartitions(table_id, partition_schema_array); + return meta_ptr_->ShowPartitions(collection_id, partition_schema_array); } Status -DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_tag, VectorsData& vectors) { +DBImpl::InsertVectors(const std::string& collection_id, const std::string& partition_tag, VectorsData& vectors) { // ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache"; if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - // insert vectors into target table + // insert vectors into target collection // (zhiru): generate ids if (vectors.id_array_.empty()) { SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance(); @@ -502,22 +502,22 @@ DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_ Status status; if (options_.wal_enable_) { std::string target_table_name; - status = GetPartitionByTag(table_id, partition_tag, target_table_name); + status = GetPartitionByTag(collection_id, partition_tag, target_table_name); if (!status.ok()) { return status; } if (!vectors.float_data_.empty()) { - wal_mgr_->Insert(table_id, partition_tag, vectors.id_array_, vectors.float_data_); + wal_mgr_->Insert(collection_id, partition_tag, vectors.id_array_, vectors.float_data_); } else if (!vectors.binary_data_.empty()) { - wal_mgr_->Insert(table_id, partition_tag, vectors.id_array_, vectors.binary_data_); + wal_mgr_->Insert(collection_id, partition_tag, vectors.id_array_, vectors.binary_data_); } bg_task_swn_.Notify(); } else { wal::MXLogRecord record; record.lsn = 0; // need to get from meta ? - record.table_id = table_id; + record.collection_id = collection_id; record.partition_tag = partition_tag; record.ids = vectors.id_array_.data(); record.length = vectors.vector_count_; @@ -540,28 +540,28 @@ DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_ } Status -DBImpl::DeleteVector(const std::string& table_id, IDNumber vector_id) { +DBImpl::DeleteVector(const std::string& collection_id, IDNumber vector_id) { IDNumbers ids; ids.push_back(vector_id); - return DeleteVectors(table_id, ids); + return DeleteVectors(collection_id, ids); } Status -DBImpl::DeleteVectors(const std::string& table_id, IDNumbers vector_ids) { +DBImpl::DeleteVectors(const std::string& collection_id, IDNumbers vector_ids) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } Status status; if (options_.wal_enable_) { - wal_mgr_->DeleteById(table_id, vector_ids); + wal_mgr_->DeleteById(collection_id, vector_ids); bg_task_swn_.Notify(); } else { wal::MXLogRecord record; record.lsn = 0; // need to get from meta ? record.type = wal::MXLogType::Delete; - record.table_id = table_id; + record.collection_id = collection_id; record.ids = vector_ids.data(); record.length = vector_ids.size(); @@ -572,27 +572,27 @@ DBImpl::DeleteVectors(const std::string& table_id, IDNumbers vector_ids) { } Status -DBImpl::Flush(const std::string& table_id) { +DBImpl::Flush(const std::string& collection_id) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } Status status; bool has_table; - status = HasTable(table_id, has_table); + status = HasTable(collection_id, has_table); if (!status.ok()) { return status; } if (!has_table) { - ENGINE_LOG_ERROR << "Table to flush does not exist: " << table_id; - return Status(DB_NOT_FOUND, "Table to flush does not exist"); + ENGINE_LOG_ERROR << "Collection to flush does not exist: " << collection_id; + return Status(DB_NOT_FOUND, "Collection to flush does not exist"); } - ENGINE_LOG_DEBUG << "Begin flush table: " << table_id; + ENGINE_LOG_DEBUG << "Begin flush collection: " << collection_id; if (options_.wal_enable_) { ENGINE_LOG_DEBUG << "WAL flush"; - auto lsn = wal_mgr_->Flush(table_id); + auto lsn = wal_mgr_->Flush(collection_id); ENGINE_LOG_DEBUG << "wal_mgr_->Flush"; if (lsn != 0) { bg_task_swn_.Notify(); @@ -604,11 +604,11 @@ DBImpl::Flush(const std::string& table_id) { ENGINE_LOG_DEBUG << "MemTable flush"; wal::MXLogRecord record; record.type = wal::MXLogType::Flush; - record.table_id = table_id; + record.collection_id = collection_id; status = ExecWalRecord(record); } - ENGINE_LOG_DEBUG << "End flush table: " << table_id; + ENGINE_LOG_DEBUG << "End flush collection: " << collection_id; return status; } @@ -642,25 +642,25 @@ DBImpl::Flush() { } Status -DBImpl::Compact(const std::string& table_id) { +DBImpl::Compact(const std::string& collection_id) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_id; + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - ENGINE_LOG_ERROR << "Table to compact does not exist: " << table_id; - return Status(DB_NOT_FOUND, "Table to compact does not exist"); + ENGINE_LOG_ERROR << "Collection to compact does not exist: " << collection_id; + return Status(DB_NOT_FOUND, "Collection to compact does not exist"); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - ENGINE_LOG_ERROR << "Table to compact does not exist: " << table_id; - return Status(DB_NOT_FOUND, "Table to compact does not exist"); + ENGINE_LOG_ERROR << "Collection to compact does not exist: " << collection_id; + return Status(DB_NOT_FOUND, "Collection to compact does not exist"); } } @@ -671,13 +671,13 @@ DBImpl::Compact(const std::string& table_id) { const std::lock_guard index_lock(build_index_mutex_); const std::lock_guard merge_lock(flush_merge_compact_mutex_); - ENGINE_LOG_DEBUG << "Compacting table: " << table_id; + ENGINE_LOG_DEBUG << "Compacting collection: " << collection_id; // Get files to compact from meta. - std::vector file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX, - meta::TableFileSchema::FILE_TYPE::BACKUP}; - meta::TableFilesSchema files_to_compact; - status = meta_ptr_->FilesByType(table_id, file_types, files_to_compact); + std::vector file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX, + meta::SegmentSchema::FILE_TYPE::BACKUP}; + meta::SegmentsSchema files_to_compact; + status = meta_ptr_->FilesByType(collection_id, file_types, files_to_compact); if (!status.ok()) { std::string err_msg = "Failed to get files to compact: " + status.message(); ENGINE_LOG_ERROR << err_msg; @@ -690,7 +690,7 @@ DBImpl::Compact(const std::string& table_id) { Status compact_status; for (auto iter = files_to_compact.begin(); iter != files_to_compact.end();) { - meta::TableFileSchema file = *iter; + meta::SegmentSchema file = *iter; iter = files_to_compact.erase(iter); // Check if the segment needs compacting @@ -705,9 +705,9 @@ DBImpl::Compact(const std::string& table_id) { continue; // skip this file and try compact next one } - meta::TableFilesSchema files_to_update; + meta::SegmentsSchema files_to_update; if (deleted_docs_size != 0) { - compact_status = CompactFile(table_id, file, files_to_update); + compact_status = CompactFile(collection_id, file, files_to_update); if (!compact_status.ok()) { ENGINE_LOG_ERROR << "Compact failed for segment " << file.segment_id_ << ": " @@ -733,30 +733,30 @@ DBImpl::Compact(const std::string& table_id) { OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_compact); if (compact_status.ok()) { - ENGINE_LOG_DEBUG << "Finished compacting table: " << table_id; + ENGINE_LOG_DEBUG << "Finished compacting collection: " << collection_id; } return compact_status; } Status -DBImpl::CompactFile(const std::string& table_id, const meta::TableFileSchema& file, - meta::TableFilesSchema& files_to_update) { - ENGINE_LOG_DEBUG << "Compacting segment " << file.segment_id_ << " for table: " << table_id; +DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema& file, + meta::SegmentsSchema& files_to_update) { + ENGINE_LOG_DEBUG << "Compacting segment " << file.segment_id_ << " for collection: " << collection_id; - // Create new table file - meta::TableFileSchema compacted_file; - compacted_file.table_id_ = table_id; + // Create new collection file + meta::SegmentSchema compacted_file; + compacted_file.collection_id_ = collection_id; // compacted_file.date_ = date; - compacted_file.file_type_ = meta::TableFileSchema::NEW_MERGE; // TODO: use NEW_MERGE for now + compacted_file.file_type_ = meta::SegmentSchema::NEW_MERGE; // TODO: use NEW_MERGE for now Status status = meta_ptr_->CreateTableFile(compacted_file); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to create table file: " << status.message(); + ENGINE_LOG_ERROR << "Failed to create collection file: " << status.message(); return status; } - // Compact (merge) file to the newly created table file + // Compact (merge) file to the newly created collection file std::string new_segment_dir; utils::GetParentPath(compacted_file.location_, new_segment_dir); @@ -773,7 +773,7 @@ DBImpl::CompactFile(const std::string& table_id, const meta::TableFileSchema& fi status = segment_writer_ptr->Serialize(); if (!status.ok()) { ENGINE_LOG_ERROR << "Failed to serialize compacted segment: " << status.message(); - compacted_file.file_type_ = meta::TableFileSchema::TO_DELETE; + compacted_file.file_type_ = meta::SegmentSchema::TO_DELETE; auto mark_status = meta_ptr_->UpdateTableFile(compacted_file); if (mark_status.ok()) { ENGINE_LOG_DEBUG << "Mark file: " << compacted_file.file_id_ << " to to_delete"; @@ -781,35 +781,35 @@ DBImpl::CompactFile(const std::string& table_id, const meta::TableFileSchema& fi return status; } - // Update table files state + // Update collection files state // if index type isn't IDMAP, set file type to TO_INDEX if file size exceed index_file_size // else set file type to RAW, no need to build index if (!utils::IsRawIndexType(compacted_file.engine_type_)) { compacted_file.file_type_ = (segment_writer_ptr->Size() >= compacted_file.index_file_size_) - ? meta::TableFileSchema::TO_INDEX - : meta::TableFileSchema::RAW; + ? meta::SegmentSchema::TO_INDEX + : meta::SegmentSchema::RAW; } else { - compacted_file.file_type_ = meta::TableFileSchema::RAW; + compacted_file.file_type_ = meta::SegmentSchema::RAW; } compacted_file.file_size_ = segment_writer_ptr->Size(); compacted_file.row_count_ = segment_writer_ptr->VectorCount(); if (compacted_file.row_count_ == 0) { ENGINE_LOG_DEBUG << "Compacted segment is empty. Mark it as TO_DELETE"; - compacted_file.file_type_ = meta::TableFileSchema::TO_DELETE; + compacted_file.file_type_ = meta::SegmentSchema::TO_DELETE; } files_to_update.emplace_back(compacted_file); // Set all files in segment to TO_DELETE auto& segment_id = file.segment_id_; - meta::TableFilesSchema segment_files; + meta::SegmentsSchema segment_files; status = meta_ptr_->GetTableFilesBySegmentId(segment_id, segment_files); if (!status.ok()) { return status; } for (auto& f : segment_files) { - f.file_type_ = meta::TableFileSchema::FILE_TYPE::TO_DELETE; + f.file_type_ = meta::SegmentSchema::FILE_TYPE::TO_DELETE; files_to_update.emplace_back(f); } @@ -825,38 +825,38 @@ DBImpl::CompactFile(const std::string& table_id, const meta::TableFileSchema& fi } Status -DBImpl::GetVectorByID(const std::string& table_id, const IDNumber& vector_id, VectorsData& vector) { +DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_id, VectorsData& vector) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } bool has_table; - auto status = HasTable(table_id, has_table); + auto status = HasTable(collection_id, has_table); if (!has_table) { - ENGINE_LOG_ERROR << "Table " << table_id << " does not exist: "; - return Status(DB_NOT_FOUND, "Table does not exist"); + ENGINE_LOG_ERROR << "Collection " << collection_id << " does not exist: "; + return Status(DB_NOT_FOUND, "Collection does not exist"); } if (!status.ok()) { return status; } - meta::TableFilesSchema files_to_query; + meta::SegmentsSchema files_to_query; - std::vector file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX, - meta::TableFileSchema::FILE_TYPE::BACKUP}; - meta::TableFilesSchema table_files; - status = meta_ptr_->FilesByType(table_id, file_types, files_to_query); + std::vector file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX, + meta::SegmentSchema::FILE_TYPE::BACKUP}; + meta::SegmentsSchema table_files; + status = meta_ptr_->FilesByType(collection_id, file_types, files_to_query); if (!status.ok()) { std::string err_msg = "Failed to get files for GetVectorByID: " + status.message(); ENGINE_LOG_ERROR << err_msg; return status; } - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - meta::TableFilesSchema files; - status = meta_ptr_->FilesByType(schema.table_id_, file_types, files); + meta::SegmentsSchema files; + status = meta_ptr_->FilesByType(schema.collection_id_, file_types, files); if (!status.ok()) { std::string err_msg = "Failed to get files for GetVectorByID: " + status.message(); ENGINE_LOG_ERROR << err_msg; @@ -874,7 +874,7 @@ DBImpl::GetVectorByID(const std::string& table_id, const IDNumber& vector_id, Ve cache::CpuCacheMgr::GetInstance()->PrintInfo(); OngoingFileChecker::GetInstance().MarkOngoingFiles(files_to_query); - status = GetVectorByIdHelper(table_id, vector_id, vector, files_to_query); + status = GetVectorByIdHelper(collection_id, vector_id, vector, files_to_query); OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_query); cache::CpuCacheMgr::GetInstance()->PrintInfo(); @@ -883,24 +883,24 @@ DBImpl::GetVectorByID(const std::string& table_id, const IDNumber& vector_id, Ve } Status -DBImpl::GetVectorIDs(const std::string& table_id, const std::string& segment_id, IDNumbers& vector_ids) { +DBImpl::GetVectorIDs(const std::string& collection_id, const std::string& segment_id, IDNumbers& vector_ids) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - // step 1: check table existence + // step 1: check collection existence bool has_table; - auto status = HasTable(table_id, has_table); + auto status = HasTable(collection_id, has_table); if (!has_table) { - ENGINE_LOG_ERROR << "Table " << table_id << " does not exist: "; - return Status(DB_NOT_FOUND, "Table does not exist"); + ENGINE_LOG_ERROR << "Collection " << collection_id << " does not exist: "; + return Status(DB_NOT_FOUND, "Collection does not exist"); } if (!status.ok()) { return status; } // step 2: find segment - meta::TableFilesSchema table_files; + meta::SegmentsSchema table_files; status = meta_ptr_->GetTableFilesBySegmentId(segment_id, table_files); if (!status.ok()) { return status; @@ -910,14 +910,14 @@ DBImpl::GetVectorIDs(const std::string& table_id, const std::string& segment_id, return Status(DB_NOT_FOUND, "Segment does not exist"); } - // check the segment is belong to this table - if (table_files[0].table_id_ != table_id) { - // the segment could be in a partition under this table - meta::TableSchema table_schema; - table_schema.table_id_ = table_files[0].table_id_; + // check the segment is belong to this collection + if (table_files[0].collection_id_ != collection_id) { + // the segment could be in a partition under this collection + meta::CollectionSchema table_schema; + table_schema.collection_id_ = table_files[0].collection_id_; status = DescribeTable(table_schema); - if (table_schema.owner_table_ != table_id) { - return Status(DB_NOT_FOUND, "Segment does not belong to this table"); + if (table_schema.owner_table_ != collection_id) { + return Status(DB_NOT_FOUND, "Segment does not belong to this collection"); } } @@ -954,8 +954,8 @@ DBImpl::GetVectorIDs(const std::string& table_id, const std::string& segment_id, } Status -DBImpl::GetVectorByIdHelper(const std::string& table_id, IDNumber vector_id, VectorsData& vector, - const meta::TableFilesSchema& files) { +DBImpl::GetVectorByIdHelper(const std::string& collection_id, IDNumber vector_id, VectorsData& vector, + const meta::SegmentsSchema& files) { ENGINE_LOG_DEBUG << "Getting vector by id in " << files.size() << " files"; for (auto& file : files) { @@ -1020,7 +1020,7 @@ DBImpl::GetVectorByIdHelper(const std::string& table_id, IDNumber vector_id, Vec } Status -DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { +DBImpl::CreateIndex(const std::string& collection_id, const TableIndex& index) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } @@ -1035,9 +1035,9 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { // step 1: check index difference TableIndex old_index; - status = DescribeIndex(table_id, old_index); + status = DescribeIndex(collection_id, old_index); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id; + ENGINE_LOG_ERROR << "Failed to get collection index info for collection: " << collection_id; return status; } @@ -1045,7 +1045,7 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { TableIndex new_index = index; new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable if (!utils::IsSameIndex(old_index, new_index)) { - status = UpdateTableIndexRecursively(table_id, new_index); + status = UpdateTableIndexRecursively(collection_id, new_index); if (!status.ok()) { return status; } @@ -1057,33 +1057,33 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { WaitMergeFileFinish(); // step 4: wait and build index - status = index_failed_checker_.CleanFailedIndexFileOfTable(table_id); - status = WaitTableIndexRecursively(table_id, index); + status = index_failed_checker_.CleanFailedIndexFileOfTable(collection_id); + status = WaitTableIndexRecursively(collection_id, index); return status; } Status -DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) { +DBImpl::DescribeIndex(const std::string& collection_id, TableIndex& index) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - return meta_ptr_->DescribeTableIndex(table_id, index); + return meta_ptr_->DescribeTableIndex(collection_id, index); } Status -DBImpl::DropIndex(const std::string& table_id) { +DBImpl::DropIndex(const std::string& collection_id) { if (!initialized_.load(std::memory_order_acquire)) { return SHUTDOWN_ERROR; } - ENGINE_LOG_DEBUG << "Drop index for table: " << table_id; - return DropTableIndexRecursively(table_id); + ENGINE_LOG_DEBUG << "Drop index for collection: " << collection_id; + return DropTableIndexRecursively(collection_id); } Status -DBImpl::QueryByID(const std::shared_ptr& context, const std::string& table_id, +DBImpl::QueryByID(const std::shared_ptr& context, const std::string& collection_id, const std::vector& partition_tags, uint64_t k, const milvus::json& extra_params, IDNumber vector_id, ResultIds& result_ids, ResultDistances& result_distances) { if (!initialized_.load(std::memory_order_acquire)) { @@ -1094,12 +1094,12 @@ DBImpl::QueryByID(const std::shared_ptr& context, const std::st vectors_data.id_array_.emplace_back(vector_id); vectors_data.vector_count_ = 1; Status result = - Query(context, table_id, partition_tags, k, extra_params, vectors_data, result_ids, result_distances); + Query(context, collection_id, partition_tags, k, extra_params, vectors_data, result_ids, result_distances); return result; } Status -DBImpl::Query(const std::shared_ptr& context, const std::string& table_id, +DBImpl::Query(const std::shared_ptr& context, const std::string& collection_id, const std::vector& partition_tags, uint64_t k, const milvus::json& extra_params, const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances) { milvus::server::ContextChild tracer(context, "Query"); @@ -1109,20 +1109,20 @@ DBImpl::Query(const std::shared_ptr& context, const std::string } Status status; - meta::TableFilesSchema files_array; + meta::SegmentsSchema files_array; if (partition_tags.empty()) { - // no partition tag specified, means search in whole table - // get all table files from parent table - status = GetFilesToSearch(table_id, files_array); + // no partition tag specified, means search in whole collection + // get all collection files from parent collection + status = GetFilesToSearch(collection_id, files_array); if (!status.ok()) { return status; } - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - status = GetFilesToSearch(schema.table_id_, files_array); + status = GetFilesToSearch(schema.collection_id_, files_array); } if (files_array.empty()) { @@ -1131,7 +1131,7 @@ DBImpl::Query(const std::shared_ptr& context, const std::string } else { // get files from specified partitions std::set partition_name_array; - status = GetPartitionsByTags(table_id, partition_tags, partition_name_array); + status = GetPartitionsByTags(collection_id, partition_tags, partition_name_array); if (!status.ok()) { return status; // didn't match any partition. } @@ -1169,7 +1169,7 @@ DBImpl::QueryByFileID(const std::shared_ptr& context, const std ids.push_back(std::stoul(id, &sz)); } - meta::TableFilesSchema search_files; + meta::SegmentsSchema search_files; auto status = meta_ptr_->FilesByID(ids, search_files); if (!status.ok()) { return status; @@ -1200,7 +1200,7 @@ DBImpl::Size(uint64_t& result) { // internal methods /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Status -DBImpl::QueryAsync(const std::shared_ptr& context, const meta::TableFilesSchema& files, uint64_t k, +DBImpl::QueryAsync(const std::shared_ptr& context, const meta::SegmentsSchema& files, uint64_t k, const milvus::json& extra_params, const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances) { milvus::server::ContextChild tracer(context, "Query Async"); @@ -1214,7 +1214,7 @@ DBImpl::QueryAsync(const std::shared_ptr& context, const meta:: ENGINE_LOG_DEBUG << "Engine query begin, index file count: " << files.size(); scheduler::SearchJobPtr job = std::make_shared(tracer.Context(), k, extra_params, vectors); for (auto& file : files) { - scheduler::TableFileSchemaPtr file_ptr = std::make_shared(file); + scheduler::SegmentSchemaPtr file_ptr = std::make_shared(file); job->AddIndexFile(file_ptr); } @@ -1347,10 +1347,10 @@ DBImpl::StartMergeTask() { // 1. other tables may still has un-merged files // 2. server may be closed unexpected, these un-merge files need to be merged when server restart if (merge_table_ids_.empty()) { - std::vector table_schema_array; + std::vector table_schema_array; meta_ptr_->AllTables(table_schema_array); for (auto& schema : table_schema_array) { - merge_table_ids_.insert(schema.table_id_); + merge_table_ids_.insert(schema.collection_id_); } } @@ -1365,19 +1365,19 @@ DBImpl::StartMergeTask() { } Status -DBImpl::MergeFiles(const std::string& table_id, const meta::TableFilesSchema& files) { +DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema& files) { // const std::lock_guard lock(flush_merge_compact_mutex_); - ENGINE_LOG_DEBUG << "Merge files for table: " << table_id; + ENGINE_LOG_DEBUG << "Merge files for collection: " << collection_id; - // step 1: create table file - meta::TableFileSchema table_file; - table_file.table_id_ = table_id; - table_file.file_type_ = meta::TableFileSchema::NEW_MERGE; + // step 1: create collection file + meta::SegmentSchema table_file; + table_file.collection_id_ = collection_id; + table_file.file_type_ = meta::SegmentSchema::NEW_MERGE; Status status = meta_ptr_->CreateTableFile(table_file); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to create table: " << status.ToString(); + ENGINE_LOG_ERROR << "Failed to create collection: " << status.ToString(); return status; } @@ -1387,7 +1387,7 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::TableFilesSchema& fi EngineFactory::Build(table_file.dimension_, table_file.location_, (EngineType)table_file.engine_type_, (MetricType)table_file.metric_type_, table_file.nlist_); */ - meta::TableFilesSchema updated; + meta::SegmentsSchema updated; std::string new_segment_dir; utils::GetParentPath(table_file.location_, new_segment_dir); @@ -1399,7 +1399,7 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::TableFilesSchema& fi utils::GetParentPath(file.location_, segment_dir_to_merge); segment_writer_ptr->Merge(segment_dir_to_merge, table_file.file_id_); auto file_schema = file; - file_schema.file_type_ = meta::TableFileSchema::TO_DELETE; + file_schema.file_type_ = meta::SegmentSchema::TO_DELETE; updated.push_back(file_schema); auto size = segment_writer_ptr->Size(); if (size >= file_schema.index_file_size_) { @@ -1423,22 +1423,22 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::TableFilesSchema& fi // if failed to serialize merge file to disk // typical error: out of disk space, out of memory or permission denied - table_file.file_type_ = meta::TableFileSchema::TO_DELETE; + table_file.file_type_ = meta::SegmentSchema::TO_DELETE; status = meta_ptr_->UpdateTableFile(table_file); ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete"; return status; } - // step 4: update table files state + // step 4: update collection files state // if index type isn't IDMAP, set file type to TO_INDEX if file size exceed index_file_size // else set file type to RAW, no need to build index if (!utils::IsRawIndexType(table_file.engine_type_)) { table_file.file_type_ = (segment_writer_ptr->Size() >= table_file.index_file_size_) - ? meta::TableFileSchema::TO_INDEX - : meta::TableFileSchema::RAW; + ? meta::SegmentSchema::TO_INDEX + : meta::SegmentSchema::RAW; } else { - table_file.file_type_ = meta::TableFileSchema::RAW; + table_file.file_type_ = meta::SegmentSchema::RAW; } table_file.file_size_ = segment_writer_ptr->Size(); table_file.row_count_ = segment_writer_ptr->VectorCount(); @@ -1455,13 +1455,13 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::TableFilesSchema& fi } Status -DBImpl::BackgroundMergeFiles(const std::string& table_id) { +DBImpl::BackgroundMergeFiles(const std::string& collection_id) { const std::lock_guard lock(flush_merge_compact_mutex_); - meta::TableFilesSchema raw_files; - auto status = meta_ptr_->FilesToMerge(table_id, raw_files); + meta::SegmentsSchema raw_files; + auto status = meta_ptr_->FilesToMerge(collection_id, raw_files); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to get merge files for table: " << table_id; + ENGINE_LOG_ERROR << "Failed to get merge files for collection: " << collection_id; return status; } @@ -1471,11 +1471,11 @@ DBImpl::BackgroundMergeFiles(const std::string& table_id) { } status = OngoingFileChecker::GetInstance().MarkOngoingFiles(raw_files); - MergeFiles(table_id, raw_files); + MergeFiles(collection_id, raw_files); status = OngoingFileChecker::GetInstance().UnmarkOngoingFiles(raw_files); if (!initialized_.load(std::memory_order_acquire)) { - ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action for table: " << table_id; + ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action for collection: " << collection_id; } return Status::OK(); @@ -1486,10 +1486,10 @@ DBImpl::BackgroundMerge(std::set table_ids) { // ENGINE_LOG_TRACE << " Background merge thread start"; Status status; - for (auto& table_id : table_ids) { - status = BackgroundMergeFiles(table_id); + for (auto& collection_id : table_ids) { + status = BackgroundMergeFiles(collection_id); if (!status.ok()) { - ENGINE_LOG_ERROR << "Merge files for table " << table_id << " failed: " << status.ToString(); + ENGINE_LOG_ERROR << "Merge files for collection " << collection_id << " failed: " << status.ToString(); } if (!initialized_.load(std::memory_order_acquire)) { @@ -1543,7 +1543,7 @@ DBImpl::StartBuildIndexTask(bool force) { void DBImpl::BackgroundBuildIndex() { std::unique_lock lock(build_index_mutex_); - meta::TableFilesSchema to_index_files; + meta::SegmentsSchema to_index_files; meta_ptr_->FilesToIndex(to_index_files); Status status = index_failed_checker_.IgnoreFailedIndexFiles(to_index_files); @@ -1552,10 +1552,10 @@ DBImpl::BackgroundBuildIndex() { status = OngoingFileChecker::GetInstance().MarkOngoingFiles(to_index_files); // step 2: put build index task to scheduler - std::vector> job2file_map; + std::vector> job2file_map; for (auto& file : to_index_files) { scheduler::BuildIndexJobPtr job = std::make_shared(meta_ptr_, options_); - scheduler::TableFileSchemaPtr file_ptr = std::make_shared(file); + scheduler::SegmentSchemaPtr file_ptr = std::make_shared(file); job->AddToIndexFiles(file_ptr); scheduler::JobMgrInst::GetInstance()->Put(job); job2file_map.push_back(std::make_pair(job, file_ptr)); @@ -1564,7 +1564,7 @@ DBImpl::BackgroundBuildIndex() { // step 3: wait build index finished and mark failed files for (auto iter = job2file_map.begin(); iter != job2file_map.end(); ++iter) { scheduler::BuildIndexJobPtr job = iter->first; - meta::TableFileSchema& file_schema = *(iter->second.get()); + meta::SegmentSchema& file_schema = *(iter->second.get()); job->WaitBuildIndexFinish(); if (!job->GetStatus().ok()) { Status status = job->GetStatus(); @@ -1584,14 +1584,14 @@ DBImpl::BackgroundBuildIndex() { } Status -DBImpl::GetFilesToBuildIndex(const std::string& table_id, const std::vector& file_types, - meta::TableFilesSchema& files) { +DBImpl::GetFilesToBuildIndex(const std::string& collection_id, const std::vector& file_types, + meta::SegmentsSchema& files) { files.clear(); - auto status = meta_ptr_->FilesByType(table_id, file_types, files); + auto status = meta_ptr_->FilesByType(collection_id, file_types, files); // only build index for files that row count greater than certain threshold for (auto it = files.begin(); it != files.end();) { - if ((*it).file_type_ == static_cast(meta::TableFileSchema::RAW) && + if ((*it).file_type_ == static_cast(meta::SegmentSchema::RAW) && (*it).row_count_ < meta::BUILD_INDEX_THRESHOLD) { it = files.erase(it); } else { @@ -1603,11 +1603,11 @@ DBImpl::GetFilesToBuildIndex(const std::string& table_id, const std::vector } Status -DBImpl::GetFilesToSearch(const std::string& table_id, meta::TableFilesSchema& files) { - ENGINE_LOG_DEBUG << "Collect files from table: " << table_id; +DBImpl::GetFilesToSearch(const std::string& collection_id, meta::SegmentsSchema& files) { + ENGINE_LOG_DEBUG << "Collect files from collection: " << collection_id; - meta::TableFilesSchema search_files; - auto status = meta_ptr_->FilesToSearch(table_id, search_files); + meta::SegmentsSchema search_files; + auto status = meta_ptr_->FilesToSearch(collection_id, search_files); if (!status.ok()) { return status; } @@ -1619,11 +1619,12 @@ DBImpl::GetFilesToSearch(const std::string& table_id, meta::TableFilesSchema& fi } Status -DBImpl::GetPartitionByTag(const std::string& table_id, const std::string& partition_tag, std::string& partition_name) { +DBImpl::GetPartitionByTag(const std::string& collection_id, const std::string& partition_tag, + std::string& partition_name) { Status status; if (partition_tag.empty()) { - partition_name = table_id; + partition_name = collection_id; } else { // trim side-blank of tag, only compare valid characters @@ -1632,11 +1633,11 @@ DBImpl::GetPartitionByTag(const std::string& table_id, const std::string& partit server::StringHelpFunctions::TrimStringBlank(valid_tag); if (valid_tag == milvus::engine::DEFAULT_PARTITON_TAG) { - partition_name = table_id; + partition_name = collection_id; return status; } - status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name); + status = meta_ptr_->GetPartitionName(collection_id, partition_tag, partition_name); if (!status.ok()) { ENGINE_LOG_ERROR << status.message(); } @@ -1646,10 +1647,10 @@ DBImpl::GetPartitionByTag(const std::string& table_id, const std::string& partit } Status -DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vector& partition_tags, +DBImpl::GetPartitionsByTags(const std::string& collection_id, const std::vector& partition_tags, std::set& partition_name_array) { - std::vector partition_array; - auto status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + auto status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& tag : partition_tags) { // trim side-blank of tag, only compare valid characters @@ -1658,13 +1659,13 @@ DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vectorDropTable(table_id); + wal_mgr_->DropTable(collection_id); } - status = mem_mgr_->EraseMemVector(table_id); // not allow insert - status = meta_ptr_->DropTable(table_id); // soft delete table - index_failed_checker_.CleanFailedIndexFileOfTable(table_id); + status = mem_mgr_->EraseMemVector(collection_id); // not allow insert + status = meta_ptr_->DropTable(collection_id); // soft delete collection + index_failed_checker_.CleanFailedIndexFileOfTable(collection_id); - // scheduler will determine when to delete table files + // scheduler will determine when to delete collection files auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); - scheduler::DeleteJobPtr job = std::make_shared(table_id, meta_ptr_, nres); + scheduler::DeleteJobPtr job = std::make_shared(collection_id, meta_ptr_, nres); scheduler::JobMgrInst::GetInstance()->Put(job); job->WaitAndDelete(); - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - status = DropTableRecursively(schema.table_id_); + status = DropTableRecursively(schema.collection_id_); fiu_do_on("DBImpl.DropTableRecursively.failed", status = Status(DB_ERROR, "")); if (!status.ok()) { return status; @@ -1710,21 +1711,21 @@ DBImpl::DropTableRecursively(const std::string& table_id) { } Status -DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index) { - DropIndex(table_id); +DBImpl::UpdateTableIndexRecursively(const std::string& collection_id, const TableIndex& index) { + DropIndex(collection_id); - auto status = meta_ptr_->UpdateTableIndex(table_id, index); + auto status = meta_ptr_->UpdateTableIndex(collection_id, index); fiu_do_on("DBImpl.UpdateTableIndexRecursively.fail_update_table_index", status = Status(DB_META_TRANSACTION_FAILED, "")); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id; + ENGINE_LOG_ERROR << "Failed to update collection index info for collection: " << collection_id; return status; } - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - status = UpdateTableIndexRecursively(schema.table_id_, index); + status = UpdateTableIndexRecursively(schema.collection_id_, index); if (!status.ok()) { return status; } @@ -1734,48 +1735,46 @@ DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableInde } Status -DBImpl::WaitTableIndexRecursively(const std::string& table_id, const TableIndex& index) { +DBImpl::WaitTableIndexRecursively(const std::string& collection_id, const TableIndex& index) { // for IDMAP type, only wait all NEW file converted to RAW file // for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files std::vector file_types; if (utils::IsRawIndexType(index.engine_type_)) { file_types = { - static_cast(meta::TableFileSchema::NEW), - static_cast(meta::TableFileSchema::NEW_MERGE), + static_cast(meta::SegmentSchema::NEW), + static_cast(meta::SegmentSchema::NEW_MERGE), }; } else { file_types = { - static_cast(meta::TableFileSchema::RAW), - static_cast(meta::TableFileSchema::NEW), - static_cast(meta::TableFileSchema::NEW_MERGE), - static_cast(meta::TableFileSchema::NEW_INDEX), - static_cast(meta::TableFileSchema::TO_INDEX), + static_cast(meta::SegmentSchema::RAW), static_cast(meta::SegmentSchema::NEW), + static_cast(meta::SegmentSchema::NEW_MERGE), static_cast(meta::SegmentSchema::NEW_INDEX), + static_cast(meta::SegmentSchema::TO_INDEX), }; } // get files to build index - meta::TableFilesSchema table_files; - auto status = GetFilesToBuildIndex(table_id, file_types, table_files); + meta::SegmentsSchema table_files; + auto status = GetFilesToBuildIndex(collection_id, file_types, table_files); int times = 1; while (!table_files.empty()) { ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times; if (!utils::IsRawIndexType(index.engine_type_)) { - status = meta_ptr_->UpdateTableFilesToIndex(table_id); + status = meta_ptr_->UpdateTableFilesToIndex(collection_id); } std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100))); - GetFilesToBuildIndex(table_id, file_types, table_files); + GetFilesToBuildIndex(collection_id, file_types, table_files); ++times; index_failed_checker_.IgnoreFailedIndexFiles(table_files); } // build index for partition - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - status = WaitTableIndexRecursively(schema.table_id_, index); + status = WaitTableIndexRecursively(schema.collection_id_, index); fiu_do_on("DBImpl.WaitTableIndexRecursively.fail_build_table_Index_for_partition", status = Status(DB_ERROR, "")); if (!status.ok()) { @@ -1785,7 +1784,7 @@ DBImpl::WaitTableIndexRecursively(const std::string& table_id, const TableIndex& // failed to build index for some files, return error std::string err_msg; - index_failed_checker_.GetErrMsgForTable(table_id, err_msg); + index_failed_checker_.GetErrMsgForTable(collection_id, err_msg); fiu_do_on("DBImpl.WaitTableIndexRecursively.not_empty_err_msg", err_msg.append("fiu")); if (!err_msg.empty()) { return Status(DB_ERROR, err_msg); @@ -1795,19 +1794,19 @@ DBImpl::WaitTableIndexRecursively(const std::string& table_id, const TableIndex& } Status -DBImpl::DropTableIndexRecursively(const std::string& table_id) { - ENGINE_LOG_DEBUG << "Drop index for table: " << table_id; - index_failed_checker_.CleanFailedIndexFileOfTable(table_id); - auto status = meta_ptr_->DropTableIndex(table_id); +DBImpl::DropTableIndexRecursively(const std::string& collection_id) { + ENGINE_LOG_DEBUG << "Drop index for collection: " << collection_id; + index_failed_checker_.CleanFailedIndexFileOfTable(collection_id); + auto status = meta_ptr_->DropTableIndex(collection_id); if (!status.ok()) { return status; } // drop partition index - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { - status = DropTableIndexRecursively(schema.table_id_); + status = DropTableIndexRecursively(schema.collection_id_); fiu_do_on("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition", status = Status(DB_ERROR, "")); if (!status.ok()) { @@ -1819,19 +1818,19 @@ DBImpl::DropTableIndexRecursively(const std::string& table_id) { } Status -DBImpl::GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count) { +DBImpl::GetTableRowCountRecursively(const std::string& collection_id, uint64_t& row_count) { row_count = 0; - auto status = meta_ptr_->Count(table_id, row_count); + auto status = meta_ptr_->Count(collection_id, row_count); if (!status.ok()) { return status; } // get partition row count - std::vector partition_array; - status = meta_ptr_->ShowPartitions(table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(collection_id, partition_array); for (auto& schema : partition_array) { uint64_t partition_row_count = 0; - status = GetTableRowCountRecursively(schema.table_id_, partition_row_count); + status = GetTableRowCountRecursively(schema.collection_id_, partition_row_count); fiu_do_on("DBImpl.GetTableRowCountRecursively.fail_get_table_rowcount_for_partition", status = Status(DB_ERROR, "")); if (!status.ok()) { @@ -1855,10 +1854,10 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { uint64_t max_lsn = 0; if (options_.wal_enable_) { - for (auto& table : table_ids) { + for (auto& collection : table_ids) { uint64_t lsn = 0; - meta_ptr_->GetTableFlushLSN(table, lsn); - wal_mgr_->TableFlushed(table, lsn); + meta_ptr_->GetTableFlushLSN(collection, lsn); + wal_mgr_->TableFlushed(collection, lsn); if (lsn > max_lsn) { max_lsn = lsn; } @@ -1866,8 +1865,8 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { } std::lock_guard lck(merge_result_mutex_); - for (auto& table : table_ids) { - merge_table_ids_.insert(table); + for (auto& collection : table_ids) { + merge_table_ids_.insert(collection); } return max_lsn; }; @@ -1877,7 +1876,7 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { switch (record.type) { case wal::MXLogType::InsertBinary: { std::string target_table_name; - status = GetPartitionByTag(record.table_id, record.partition_tag, target_table_name); + status = GetPartitionByTag(record.collection_id, record.partition_tag, target_table_name); if (!status.ok()) { return status; } @@ -1896,7 +1895,7 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { case wal::MXLogType::InsertVector: { std::string target_table_name; - status = GetPartitionByTag(record.table_id, record.partition_tag, target_table_name); + status = GetPartitionByTag(record.collection_id, record.partition_tag, target_table_name); if (!status.ok()) { return status; } @@ -1914,28 +1913,28 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { } case wal::MXLogType::Delete: { - std::vector partition_array; - status = meta_ptr_->ShowPartitions(record.table_id, partition_array); + std::vector partition_array; + status = meta_ptr_->ShowPartitions(record.collection_id, partition_array); if (!status.ok()) { return status; } - std::vector table_ids{record.table_id}; + std::vector table_ids{record.collection_id}; for (auto& partition : partition_array) { - auto& partition_table_id = partition.table_id_; + auto& partition_table_id = partition.collection_id_; table_ids.emplace_back(partition_table_id); } if (record.length == 1) { - for (auto& table_id : table_ids) { - status = mem_mgr_->DeleteVector(table_id, *record.ids, record.lsn); + for (auto& collection_id : table_ids) { + status = mem_mgr_->DeleteVector(collection_id, *record.ids, record.lsn); if (!status.ok()) { return status; } } } else { - for (auto& table_id : table_ids) { - status = mem_mgr_->DeleteVectors(table_id, record.length, record.ids, record.lsn); + for (auto& collection_id : table_ids) { + status = mem_mgr_->DeleteVectors(collection_id, record.length, record.ids, record.lsn); if (!status.ok()) { return status; } @@ -1945,28 +1944,28 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) { } case wal::MXLogType::Flush: { - if (!record.table_id.empty()) { - // flush one table - std::vector partition_array; - status = meta_ptr_->ShowPartitions(record.table_id, partition_array); + if (!record.collection_id.empty()) { + // flush one collection + std::vector partition_array; + status = meta_ptr_->ShowPartitions(record.collection_id, partition_array); if (!status.ok()) { return status; } - std::vector table_ids{record.table_id}; + std::vector table_ids{record.collection_id}; for (auto& partition : partition_array) { - auto& partition_table_id = partition.table_id_; + auto& partition_table_id = partition.collection_id_; table_ids.emplace_back(partition_table_id); } std::set flushed_tables; - for (auto& table_id : table_ids) { + for (auto& collection_id : table_ids) { const std::lock_guard lock(flush_merge_compact_mutex_); - status = mem_mgr_->Flush(table_id); + status = mem_mgr_->Flush(collection_id); if (!status.ok()) { break; } - flushed_tables.insert(table_id); + flushed_tables.insert(collection_id); } tables_flushed(flushed_tables); @@ -2007,7 +2006,7 @@ DBImpl::BackgroundWalTask() { auto auto_flush = [&]() { record.type = wal::MXLogType::Flush; - record.table_id.clear(); + record.collection_id.clear(); ExecWalRecord(record); StartMetricTask(); @@ -2036,7 +2035,7 @@ DBImpl::BackgroundWalTask() { flush_task_swn_.Notify(); // if user flush all manually, update auto flush also - if (record.table_id.empty() && options_.auto_flush_interval_ > 0) { + if (record.collection_id.empty() && options_.auto_flush_interval_ > 0) { next_auto_flush_time = get_next_auto_flush_time(); } } diff --git a/core/src/db/DBImpl.h b/core/src/db/DBImpl.h index d101a84456..a4d19d3876 100644 --- a/core/src/db/DBImpl.h +++ b/core/src/db/DBImpl.h @@ -55,91 +55,92 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi DropAll() override; Status - CreateTable(meta::TableSchema& table_schema) override; + CreateTable(meta::CollectionSchema& table_schema) override; Status - DropTable(const std::string& table_id) override; + DropTable(const std::string& collection_id) override; Status - DescribeTable(meta::TableSchema& table_schema) override; + DescribeTable(meta::CollectionSchema& table_schema) override; Status - HasTable(const std::string& table_id, bool& has_or_not) override; + HasTable(const std::string& collection_id, bool& has_or_not) override; Status - HasNativeTable(const std::string& table_id, bool& has_or_not_) override; + HasNativeTable(const std::string& collection_id, bool& has_or_not_) override; Status - AllTables(std::vector& table_schema_array) override; + AllTables(std::vector& table_schema_array) override; Status - GetTableInfo(const std::string& table_id, TableInfo& table_info) override; + GetTableInfo(const std::string& collection_id, TableInfo& table_info) override; Status - PreloadTable(const std::string& table_id) override; + PreloadTable(const std::string& collection_id) override; Status - UpdateTableFlag(const std::string& table_id, int64_t flag) override; + UpdateTableFlag(const std::string& collection_id, int64_t flag) override; Status - GetTableRowCount(const std::string& table_id, uint64_t& row_count) override; + GetTableRowCount(const std::string& collection_id, uint64_t& row_count) override; Status - CreatePartition(const std::string& table_id, const std::string& partition_name, + CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& partition_tag) override; Status DropPartition(const std::string& partition_name) override; Status - DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override; + DropPartitionByTag(const std::string& collection_id, const std::string& partition_tag) override; Status - ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) override; + ShowPartitions(const std::string& collection_id, + std::vector& partition_schema_array) override; Status - InsertVectors(const std::string& table_id, const std::string& partition_tag, VectorsData& vectors) override; + InsertVectors(const std::string& collection_id, const std::string& partition_tag, VectorsData& vectors) override; Status - DeleteVector(const std::string& table_id, IDNumber vector_id) override; + DeleteVector(const std::string& collection_id, IDNumber vector_id) override; Status - DeleteVectors(const std::string& table_id, IDNumbers vector_ids) override; + DeleteVectors(const std::string& collection_id, IDNumbers vector_ids) override; Status - Flush(const std::string& table_id) override; + Flush(const std::string& collection_id) override; Status Flush() override; Status - Compact(const std::string& table_id) override; + Compact(const std::string& collection_id) override; Status - GetVectorByID(const std::string& table_id, const IDNumber& vector_id, VectorsData& vector) override; + GetVectorByID(const std::string& collection_id, const IDNumber& vector_id, VectorsData& vector) override; Status - GetVectorIDs(const std::string& table_id, const std::string& segment_id, IDNumbers& vector_ids) override; + GetVectorIDs(const std::string& collection_id, const std::string& segment_id, IDNumbers& vector_ids) override; // Status // Merge(const std::set& table_ids) override; Status - CreateIndex(const std::string& table_id, const TableIndex& index) override; + CreateIndex(const std::string& collection_id, const TableIndex& index) override; Status - DescribeIndex(const std::string& table_id, TableIndex& index) override; + DescribeIndex(const std::string& collection_id, TableIndex& index) override; Status - DropIndex(const std::string& table_id) override; + DropIndex(const std::string& collection_id) override; Status - QueryByID(const std::shared_ptr& context, const std::string& table_id, + QueryByID(const std::shared_ptr& context, const std::string& collection_id, const std::vector& partition_tags, uint64_t k, const milvus::json& extra_params, IDNumber vector_id, ResultIds& result_ids, ResultDistances& result_distances) override; Status - Query(const std::shared_ptr& context, const std::string& table_id, + Query(const std::shared_ptr& context, const std::string& collection_id, const std::vector& partition_tags, uint64_t k, const milvus::json& extra_params, const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances) override; @@ -160,13 +161,13 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi private: Status - QueryAsync(const std::shared_ptr& context, const meta::TableFilesSchema& files, uint64_t k, + QueryAsync(const std::shared_ptr& context, const meta::SegmentsSchema& files, uint64_t k, const milvus::json& extra_params, const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances); Status - GetVectorByIdHelper(const std::string& table_id, IDNumber vector_id, VectorsData& vector, - const meta::TableFilesSchema& files); + GetVectorByIdHelper(const std::string& collection_id, IDNumber vector_id, VectorsData& vector, + const meta::SegmentsSchema& files); void BackgroundTimerTask(); @@ -184,10 +185,10 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi StartMergeTask(); Status - MergeFiles(const std::string& table_id, const meta::TableFilesSchema& files); + MergeFiles(const std::string& collection_id, const meta::SegmentsSchema& files); Status - BackgroundMergeFiles(const std::string& table_id); + BackgroundMergeFiles(const std::string& collection_id); void BackgroundMerge(std::set table_ids); @@ -199,8 +200,8 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi BackgroundBuildIndex(); Status - CompactFile(const std::string& table_id, const meta::TableFileSchema& file, - meta::TableFilesSchema& files_to_update); + CompactFile(const std::string& collection_id, const meta::SegmentSchema& file, + meta::SegmentsSchema& files_to_update); /* Status @@ -208,33 +209,33 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi */ Status - GetFilesToBuildIndex(const std::string& table_id, const std::vector& file_types, - meta::TableFilesSchema& files); + GetFilesToBuildIndex(const std::string& collection_id, const std::vector& file_types, + meta::SegmentsSchema& files); Status - GetFilesToSearch(const std::string& table_id, meta::TableFilesSchema& files); + GetFilesToSearch(const std::string& collection_id, meta::SegmentsSchema& files); Status - GetPartitionByTag(const std::string& table_id, const std::string& partition_tag, std::string& partition_name); + GetPartitionByTag(const std::string& collection_id, const std::string& partition_tag, std::string& partition_name); Status - GetPartitionsByTags(const std::string& table_id, const std::vector& partition_tags, + GetPartitionsByTags(const std::string& collection_id, const std::vector& partition_tags, std::set& partition_name_array); Status - DropTableRecursively(const std::string& table_id); + DropTableRecursively(const std::string& collection_id); Status - UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index); + UpdateTableIndexRecursively(const std::string& collection_id, const TableIndex& index); Status - WaitTableIndexRecursively(const std::string& table_id, const TableIndex& index); + WaitTableIndexRecursively(const std::string& collection_id, const TableIndex& index); Status - DropTableIndexRecursively(const std::string& table_id); + DropTableIndexRecursively(const std::string& collection_id); Status - GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count); + GetTableRowCountRecursively(const std::string& collection_id, uint64_t& row_count); Status ExecWalRecord(const wal::MXLogRecord& record); diff --git a/core/src/db/IndexFailedChecker.cpp b/core/src/db/IndexFailedChecker.cpp index eef913fbd8..a6cf982cac 100644 --- a/core/src/db/IndexFailedChecker.cpp +++ b/core/src/db/IndexFailedChecker.cpp @@ -20,17 +20,17 @@ namespace engine { constexpr uint64_t INDEX_FAILED_RETRY_TIME = 1; Status -IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& table_id) { +IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& collection_id) { std::lock_guard lck(mutex_); - index_failed_files_.erase(table_id); // rebuild failed index files for this table + index_failed_files_.erase(collection_id); // rebuild failed index files for this collection return Status::OK(); } Status -IndexFailedChecker::GetErrMsgForTable(const std::string& table_id, std::string& err_msg) { +IndexFailedChecker::GetErrMsgForTable(const std::string& collection_id, std::string& err_msg) { std::lock_guard lck(mutex_); - auto iter = index_failed_files_.find(table_id); + auto iter = index_failed_files_.find(collection_id); if (iter != index_failed_files_.end()) { err_msg = iter->second.begin()->second[0]; } @@ -39,14 +39,14 @@ IndexFailedChecker::GetErrMsgForTable(const std::string& table_id, std::string& } Status -IndexFailedChecker::MarkFailedIndexFile(const meta::TableFileSchema& file, const std::string& err_msg) { +IndexFailedChecker::MarkFailedIndexFile(const meta::SegmentSchema& file, const std::string& err_msg) { std::lock_guard lck(mutex_); - auto iter = index_failed_files_.find(file.table_id_); + auto iter = index_failed_files_.find(file.collection_id_); if (iter == index_failed_files_.end()) { File2ErrArray failed_files; failed_files.insert(std::make_pair(file.file_id_, std::vector(1, err_msg))); - index_failed_files_.insert(std::make_pair(file.table_id_, failed_files)); + index_failed_files_.insert(std::make_pair(file.collection_id_, failed_files)); } else { auto it_failed_files = iter->second.find(file.file_id_); if (it_failed_files != iter->second.end()) { @@ -60,14 +60,14 @@ IndexFailedChecker::MarkFailedIndexFile(const meta::TableFileSchema& file, const } Status -IndexFailedChecker::MarkSucceedIndexFile(const meta::TableFileSchema& file) { +IndexFailedChecker::MarkSucceedIndexFile(const meta::SegmentSchema& file) { std::lock_guard lck(mutex_); - auto iter = index_failed_files_.find(file.table_id_); + auto iter = index_failed_files_.find(file.collection_id_); if (iter != index_failed_files_.end()) { iter->second.erase(file.file_id_); if (iter->second.empty()) { - index_failed_files_.erase(file.table_id_); + index_failed_files_.erase(file.collection_id_); } } @@ -75,14 +75,14 @@ IndexFailedChecker::MarkSucceedIndexFile(const meta::TableFileSchema& file) { } Status -IndexFailedChecker::IgnoreFailedIndexFiles(meta::TableFilesSchema& table_files) { +IndexFailedChecker::IgnoreFailedIndexFiles(meta::SegmentsSchema& table_files) { std::lock_guard lck(mutex_); - // there could be some failed files belong to different table. + // there could be some failed files belong to different collection. // some files may has failed for several times, no need to build index for these files. // thus we can avoid dead circle for build index operation for (auto it_file = table_files.begin(); it_file != table_files.end();) { - auto it_failed_files = index_failed_files_.find((*it_file).table_id_); + auto it_failed_files = index_failed_files_.find((*it_file).collection_id_); if (it_failed_files != index_failed_files_.end()) { auto it_failed_file = it_failed_files->second.find((*it_file).file_id_); if (it_failed_file != it_failed_files->second.end()) { diff --git a/core/src/db/IndexFailedChecker.h b/core/src/db/IndexFailedChecker.h index 277611a2dc..8c43425801 100644 --- a/core/src/db/IndexFailedChecker.h +++ b/core/src/db/IndexFailedChecker.h @@ -25,23 +25,23 @@ namespace engine { class IndexFailedChecker { public: Status - CleanFailedIndexFileOfTable(const std::string& table_id); + CleanFailedIndexFileOfTable(const std::string& collection_id); Status - GetErrMsgForTable(const std::string& table_id, std::string& err_msg); + GetErrMsgForTable(const std::string& collection_id, std::string& err_msg); Status - MarkFailedIndexFile(const meta::TableFileSchema& file, const std::string& err_msg); + MarkFailedIndexFile(const meta::SegmentSchema& file, const std::string& err_msg); Status - MarkSucceedIndexFile(const meta::TableFileSchema& file); + MarkSucceedIndexFile(const meta::SegmentSchema& file); Status - IgnoreFailedIndexFiles(meta::TableFilesSchema& table_files); + IgnoreFailedIndexFiles(meta::SegmentsSchema& table_files); private: std::mutex mutex_; - Table2FileErr index_failed_files_; // table id mapping to (file id mapping to failed times) + Table2FileErr index_failed_files_; // collection id mapping to (file id mapping to failed times) }; } // namespace engine diff --git a/core/src/db/OngoingFileChecker.cpp b/core/src/db/OngoingFileChecker.cpp index 7341c8ce83..b4a19f7f7f 100644 --- a/core/src/db/OngoingFileChecker.cpp +++ b/core/src/db/OngoingFileChecker.cpp @@ -24,13 +24,13 @@ OngoingFileChecker::GetInstance() { } Status -OngoingFileChecker::MarkOngoingFile(const meta::TableFileSchema& table_file) { +OngoingFileChecker::MarkOngoingFile(const meta::SegmentSchema& table_file) { std::lock_guard lck(mutex_); return MarkOngoingFileNoLock(table_file); } Status -OngoingFileChecker::MarkOngoingFiles(const meta::TableFilesSchema& table_files) { +OngoingFileChecker::MarkOngoingFiles(const meta::SegmentsSchema& table_files) { std::lock_guard lck(mutex_); for (auto& table_file : table_files) { @@ -41,13 +41,13 @@ OngoingFileChecker::MarkOngoingFiles(const meta::TableFilesSchema& table_files) } Status -OngoingFileChecker::UnmarkOngoingFile(const meta::TableFileSchema& table_file) { +OngoingFileChecker::UnmarkOngoingFile(const meta::SegmentSchema& table_file) { std::lock_guard lck(mutex_); return UnmarkOngoingFileNoLock(table_file); } Status -OngoingFileChecker::UnmarkOngoingFiles(const meta::TableFilesSchema& table_files) { +OngoingFileChecker::UnmarkOngoingFiles(const meta::SegmentsSchema& table_files) { std::lock_guard lck(mutex_); for (auto& table_file : table_files) { @@ -58,10 +58,10 @@ OngoingFileChecker::UnmarkOngoingFiles(const meta::TableFilesSchema& table_files } bool -OngoingFileChecker::IsIgnored(const meta::TableFileSchema& schema) { +OngoingFileChecker::IsIgnored(const meta::SegmentSchema& schema) { std::lock_guard lck(mutex_); - auto iter = ongoing_files_.find(schema.table_id_); + auto iter = ongoing_files_.find(schema.collection_id_); if (iter == ongoing_files_.end()) { return false; } else { @@ -75,16 +75,16 @@ OngoingFileChecker::IsIgnored(const meta::TableFileSchema& schema) { } Status -OngoingFileChecker::MarkOngoingFileNoLock(const meta::TableFileSchema& table_file) { - if (table_file.table_id_.empty() || table_file.file_id_.empty()) { - return Status(DB_ERROR, "Invalid table files"); +OngoingFileChecker::MarkOngoingFileNoLock(const meta::SegmentSchema& table_file) { + if (table_file.collection_id_.empty() || table_file.file_id_.empty()) { + return Status(DB_ERROR, "Invalid collection files"); } - auto iter = ongoing_files_.find(table_file.table_id_); + auto iter = ongoing_files_.find(table_file.collection_id_); if (iter == ongoing_files_.end()) { File2RefCount files_refcount; files_refcount.insert(std::make_pair(table_file.file_id_, 1)); - ongoing_files_.insert(std::make_pair(table_file.table_id_, files_refcount)); + ongoing_files_.insert(std::make_pair(table_file.collection_id_, files_refcount)); } else { auto it_file = iter->second.find(table_file.file_id_); if (it_file == iter->second.end()) { @@ -95,18 +95,18 @@ OngoingFileChecker::MarkOngoingFileNoLock(const meta::TableFileSchema& table_fil } ENGINE_LOG_DEBUG << "Mark ongoing file:" << table_file.file_id_ - << " refcount:" << ongoing_files_[table_file.table_id_][table_file.file_id_]; + << " refcount:" << ongoing_files_[table_file.collection_id_][table_file.file_id_]; return Status::OK(); } Status -OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_file) { - if (table_file.table_id_.empty() || table_file.file_id_.empty()) { - return Status(DB_ERROR, "Invalid table files"); +OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::SegmentSchema& table_file) { + if (table_file.collection_id_.empty() || table_file.file_id_.empty()) { + return Status(DB_ERROR, "Invalid collection files"); } - auto iter = ongoing_files_.find(table_file.table_id_); + auto iter = ongoing_files_.find(table_file.collection_id_); if (iter != ongoing_files_.end()) { auto it_file = iter->second.find(table_file.file_id_); if (it_file != iter->second.end()) { @@ -117,7 +117,7 @@ OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_f if (it_file->second <= 0) { iter->second.erase(table_file.file_id_); if (iter->second.empty()) { - ongoing_files_.erase(table_file.table_id_); + ongoing_files_.erase(table_file.collection_id_); } } } diff --git a/core/src/db/OngoingFileChecker.h b/core/src/db/OngoingFileChecker.h index 923d089a30..48832c17b8 100644 --- a/core/src/db/OngoingFileChecker.h +++ b/core/src/db/OngoingFileChecker.h @@ -29,30 +29,30 @@ class OngoingFileChecker { GetInstance(); Status - MarkOngoingFile(const meta::TableFileSchema& table_file); + MarkOngoingFile(const meta::SegmentSchema& table_file); Status - MarkOngoingFiles(const meta::TableFilesSchema& table_files); + MarkOngoingFiles(const meta::SegmentsSchema& table_files); Status - UnmarkOngoingFile(const meta::TableFileSchema& table_file); + UnmarkOngoingFile(const meta::SegmentSchema& table_file); Status - UnmarkOngoingFiles(const meta::TableFilesSchema& table_files); + UnmarkOngoingFiles(const meta::SegmentsSchema& table_files); bool - IsIgnored(const meta::TableFileSchema& schema); + IsIgnored(const meta::SegmentSchema& schema); private: Status - MarkOngoingFileNoLock(const meta::TableFileSchema& table_file); + MarkOngoingFileNoLock(const meta::SegmentSchema& table_file); Status - UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_file); + UnmarkOngoingFileNoLock(const meta::SegmentSchema& table_file); private: std::mutex mutex_; - Table2FileRef ongoing_files_; // table id mapping to (file id mapping to ongoing ref-count) + Table2FileRef ongoing_files_; // collection id mapping to (file id mapping to ongoing ref-count) }; } // namespace engine diff --git a/core/src/db/Utils.cpp b/core/src/db/Utils.cpp index 24fa29a49a..f714813884 100644 --- a/core/src/db/Utils.cpp +++ b/core/src/db/Utils.cpp @@ -36,19 +36,19 @@ uint64_t index_file_counter = 0; std::mutex index_file_counter_mutex; static std::string -ConstructParentFolder(const std::string& db_path, const meta::TableFileSchema& table_file) { - std::string table_path = db_path + TABLES_FOLDER + table_file.table_id_; +ConstructParentFolder(const std::string& db_path, const meta::SegmentSchema& table_file) { + std::string table_path = db_path + TABLES_FOLDER + table_file.collection_id_; std::string partition_path = table_path + "/" + table_file.segment_id_; return partition_path; } static std::string -GetTableFileParentFolder(const DBMetaOptions& options, const meta::TableFileSchema& table_file) { +GetTableFileParentFolder(const DBMetaOptions& options, const meta::SegmentSchema& table_file) { uint64_t path_count = options.slave_paths_.size() + 1; std::string target_path = options.path_; uint64_t index = 0; - if (meta::TableFileSchema::NEW_INDEX == table_file.file_type_) { + if (meta::SegmentSchema::NEW_INDEX == table_file.file_type_) { // index file is large file and to be persisted permanently // we need to distribute index files to each db_path averagely // round robin according to a file counter @@ -79,9 +79,9 @@ GetMicroSecTimeStamp() { } Status -CreateTablePath(const DBMetaOptions& options, const std::string& table_id) { +CreateTablePath(const DBMetaOptions& options, const std::string& collection_id) { std::string db_path = options.path_; - std::string table_path = db_path + TABLES_FOLDER + table_id; + std::string table_path = db_path + TABLES_FOLDER + collection_id; auto status = server::CommonUtil::CreateDirectory(table_path); if (!status.ok()) { ENGINE_LOG_ERROR << status.message(); @@ -89,7 +89,7 @@ CreateTablePath(const DBMetaOptions& options, const std::string& table_id) { } for (auto& path : options.slave_paths_) { - table_path = path + TABLES_FOLDER + table_id; + table_path = path + TABLES_FOLDER + collection_id; status = server::CommonUtil::CreateDirectory(table_path); fiu_do_on("CreateTablePath.creat_slave_path", status = Status(DB_INVALID_PATH, "")); if (!status.ok()) { @@ -102,18 +102,18 @@ CreateTablePath(const DBMetaOptions& options, const std::string& table_id) { } Status -DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool force) { +DeleteTablePath(const DBMetaOptions& options, const std::string& collection_id, bool force) { std::vector paths = options.slave_paths_; paths.push_back(options.path_); for (auto& path : paths) { - std::string table_path = path + TABLES_FOLDER + table_id; + std::string table_path = path + TABLES_FOLDER + collection_id; if (force) { boost::filesystem::remove_all(table_path); - ENGINE_LOG_DEBUG << "Remove table folder: " << table_path; + ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path; } else if (boost::filesystem::exists(table_path) && boost::filesystem::is_empty(table_path)) { boost::filesystem::remove_all(table_path); - ENGINE_LOG_DEBUG << "Remove table folder: " << table_path; + ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path; } } @@ -122,7 +122,7 @@ DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool config.GetStorageConfigS3Enable(s3_enable); if (s3_enable) { - std::string table_path = options.path_ + TABLES_FOLDER + table_id; + std::string table_path = options.path_ + TABLES_FOLDER + collection_id; auto& storage_inst = milvus::storage::S3ClientWrapper::GetInstance(); Status stat = storage_inst.DeleteObjects(table_path); @@ -135,7 +135,7 @@ DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool } Status -CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) { +CreateTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) { std::string parent_path = GetTableFileParentFolder(options, table_file); auto status = server::CommonUtil::CreateDirectory(parent_path); @@ -151,7 +151,7 @@ CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_f } Status -GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) { +GetTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) { std::string parent_path = ConstructParentFolder(options.path_, table_file); std::string file_path = parent_path + "/" + table_file.file_id_; @@ -179,23 +179,23 @@ GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file } } - std::string msg = "Table file doesn't exist: " + file_path; + std::string msg = "Collection file doesn't exist: " + file_path; if (table_file.file_size_ > 0) { // no need to pop error for empty file - ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for table: " << table_file.table_id_; + ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for collection: " << table_file.collection_id_; } return Status(DB_ERROR, msg); } Status -DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) { +DeleteTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) { utils::GetTableFilePath(options, table_file); boost::filesystem::remove(table_file.location_); return Status::OK(); } Status -DeleteSegment(const DBMetaOptions& options, meta::TableFileSchema& table_file) { +DeleteSegment(const DBMetaOptions& options, meta::SegmentSchema& table_file) { utils::GetTableFilePath(options, table_file); std::string segment_dir; GetParentPath(table_file.location_, segment_dir); diff --git a/core/src/db/Utils.h b/core/src/db/Utils.h index c78b4fd717..4702ede7c4 100644 --- a/core/src/db/Utils.h +++ b/core/src/db/Utils.h @@ -26,18 +26,18 @@ int64_t GetMicroSecTimeStamp(); Status -CreateTablePath(const DBMetaOptions& options, const std::string& table_id); +CreateTablePath(const DBMetaOptions& options, const std::string& collection_id); Status -DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool force = true); +DeleteTablePath(const DBMetaOptions& options, const std::string& collection_id, bool force = true); Status -CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file); +CreateTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file); Status -GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file); +GetTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file); Status -DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file); +DeleteTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file); Status -DeleteSegment(const DBMetaOptions& options, meta::TableFileSchema& table_file); +DeleteSegment(const DBMetaOptions& options, meta::SegmentSchema& table_file); Status GetParentPath(const std::string& path, std::string& parent_path); diff --git a/core/src/db/insert/MemManager.h b/core/src/db/insert/MemManager.h index a644a979cd..77a3ffbf37 100644 --- a/core/src/db/insert/MemManager.h +++ b/core/src/db/insert/MemManager.h @@ -24,21 +24,21 @@ namespace engine { class MemManager { public: virtual Status - InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim, + InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim, const float* vectors, uint64_t lsn, std::set& flushed_tables) = 0; virtual Status - InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim, + InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim, const uint8_t* vectors, uint64_t lsn, std::set& flushed_tables) = 0; virtual Status - DeleteVector(const std::string& table_id, IDNumber vector_id, uint64_t lsn) = 0; + DeleteVector(const std::string& collection_id, IDNumber vector_id, uint64_t lsn) = 0; virtual Status - DeleteVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) = 0; + DeleteVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) = 0; virtual Status - Flush(const std::string& table_id, bool apply_delete = true) = 0; + Flush(const std::string& collection_id, bool apply_delete = true) = 0; virtual Status Flush(std::set& table_ids, bool apply_delete = true) = 0; @@ -47,7 +47,7 @@ class MemManager { // Serialize(std::set& table_ids) = 0; virtual Status - EraseMemVector(const std::string& table_id) = 0; + EraseMemVector(const std::string& collection_id) = 0; virtual size_t GetCurrentMutableMem() = 0; diff --git a/core/src/db/insert/MemManagerImpl.cpp b/core/src/db/insert/MemManagerImpl.cpp index d55ec54b3c..cef2d9803e 100644 --- a/core/src/db/insert/MemManagerImpl.cpp +++ b/core/src/db/insert/MemManagerImpl.cpp @@ -21,18 +21,18 @@ namespace milvus { namespace engine { MemTablePtr -MemManagerImpl::GetMemByTable(const std::string& table_id) { - auto memIt = mem_id_map_.find(table_id); +MemManagerImpl::GetMemByTable(const std::string& collection_id) { + auto memIt = mem_id_map_.find(collection_id); if (memIt != mem_id_map_.end()) { return memIt->second; } - mem_id_map_[table_id] = std::make_shared(table_id, meta_, options_); - return mem_id_map_[table_id]; + mem_id_map_[collection_id] = std::make_shared(collection_id, meta_, options_); + return mem_id_map_[collection_id]; } Status -MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim, +MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim, const float* vectors, uint64_t lsn, std::set& flushed_tables) { flushed_tables.clear(); if (GetCurrentMem() > options_.insert_buffer_size_) { @@ -54,11 +54,11 @@ MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const std::unique_lock lock(mutex_); - return InsertVectorsNoLock(table_id, source, lsn); + return InsertVectorsNoLock(collection_id, source, lsn); } Status -MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim, +MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim, const uint8_t* vectors, uint64_t lsn, std::set& flushed_tables) { flushed_tables.clear(); if (GetCurrentMem() > options_.insert_buffer_size_) { @@ -80,12 +80,12 @@ MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const std::unique_lock lock(mutex_); - return InsertVectorsNoLock(table_id, source, lsn); + return InsertVectorsNoLock(collection_id, source, lsn); } Status -MemManagerImpl::InsertVectorsNoLock(const std::string& table_id, const VectorSourcePtr& source, uint64_t lsn) { - MemTablePtr mem = GetMemByTable(table_id); +MemManagerImpl::InsertVectorsNoLock(const std::string& collection_id, const VectorSourcePtr& source, uint64_t lsn) { + MemTablePtr mem = GetMemByTable(collection_id); mem->SetLSN(lsn); auto status = mem->Add(source); @@ -93,18 +93,19 @@ MemManagerImpl::InsertVectorsNoLock(const std::string& table_id, const VectorSou } Status -MemManagerImpl::DeleteVector(const std::string& table_id, IDNumber vector_id, uint64_t lsn) { +MemManagerImpl::DeleteVector(const std::string& collection_id, IDNumber vector_id, uint64_t lsn) { std::unique_lock lock(mutex_); - MemTablePtr mem = GetMemByTable(table_id); + MemTablePtr mem = GetMemByTable(collection_id); mem->SetLSN(lsn); auto status = mem->Delete(vector_id); return status; } Status -MemManagerImpl::DeleteVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) { +MemManagerImpl::DeleteVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, + uint64_t lsn) { std::unique_lock lock(mutex_); - MemTablePtr mem = GetMemByTable(table_id); + MemTablePtr mem = GetMemByTable(collection_id); mem->SetLSN(lsn); IDNumbers ids; @@ -128,8 +129,8 @@ MemManagerImpl::DeleteVectors(const std::string& table_id, int64_t length, const } Status -MemManagerImpl::Flush(const std::string& table_id, bool apply_delete) { - ToImmutable(table_id); +MemManagerImpl::Flush(const std::string& collection_id, bool apply_delete) { + ToImmutable(collection_id); // TODO: There is actually only one memTable in the immutable list MemList temp_immutable_list; { @@ -140,13 +141,13 @@ MemManagerImpl::Flush(const std::string& table_id, bool apply_delete) { std::unique_lock lock(serialization_mtx_); auto max_lsn = GetMaxLSN(temp_immutable_list); for (auto& mem : temp_immutable_list) { - ENGINE_LOG_DEBUG << "Flushing table: " << mem->GetTableId(); + ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId(); auto status = mem->Serialize(max_lsn, apply_delete); if (!status.ok()) { - ENGINE_LOG_ERROR << "Flush table " << mem->GetTableId() << " failed"; + ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed"; return status; } - ENGINE_LOG_DEBUG << "Flushed table: " << mem->GetTableId(); + ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId(); } return Status::OK(); @@ -166,14 +167,14 @@ MemManagerImpl::Flush(std::set& table_ids, bool apply_delete) { table_ids.clear(); auto max_lsn = GetMaxLSN(temp_immutable_list); for (auto& mem : temp_immutable_list) { - ENGINE_LOG_DEBUG << "Flushing table: " << mem->GetTableId(); + ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId(); auto status = mem->Serialize(max_lsn, apply_delete); if (!status.ok()) { - ENGINE_LOG_ERROR << "Flush table " << mem->GetTableId() << " failed"; + ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed"; return status; } table_ids.insert(mem->GetTableId()); - ENGINE_LOG_DEBUG << "Flushed table: " << mem->GetTableId(); + ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId(); } meta_->SetGlobalLastLSN(max_lsn); @@ -182,15 +183,15 @@ MemManagerImpl::Flush(std::set& table_ids, bool apply_delete) { } Status -MemManagerImpl::ToImmutable(const std::string& table_id) { +MemManagerImpl::ToImmutable(const std::string& collection_id) { std::unique_lock lock(mutex_); - auto memIt = mem_id_map_.find(table_id); + auto memIt = mem_id_map_.find(collection_id); if (memIt != mem_id_map_.end()) { if (!memIt->second->Empty()) { immu_mem_list_.push_back(memIt->second); mem_id_map_.erase(memIt); } - // std::string err_msg = "Could not find table = " + table_id + " to flush"; + // std::string err_msg = "Could not find collection = " + collection_id + " to flush"; // ENGINE_LOG_ERROR << err_msg; // return Status(DB_NOT_FOUND, err_msg); } @@ -204,7 +205,7 @@ MemManagerImpl::ToImmutable() { MemIdMap temp_map; for (auto& kv : mem_id_map_) { if (kv.second->Empty()) { - // empty table without any deletes, no need to serialize + // empty collection without any deletes, no need to serialize temp_map.insert(kv); } else { immu_mem_list_.push_back(kv.second); @@ -216,17 +217,17 @@ MemManagerImpl::ToImmutable() { } Status -MemManagerImpl::EraseMemVector(const std::string& table_id) { +MemManagerImpl::EraseMemVector(const std::string& collection_id) { { // erase MemVector from rapid-insert cache std::unique_lock lock(mutex_); - mem_id_map_.erase(table_id); + mem_id_map_.erase(collection_id); } { // erase MemVector from serialize cache std::unique_lock lock(serialization_mtx_); MemList temp_list; for (auto& mem : immu_mem_list_) { - if (mem->GetTableId() != table_id) { + if (mem->GetTableId() != collection_id) { temp_list.push_back(mem); } } @@ -265,9 +266,9 @@ MemManagerImpl::GetCurrentMem() { uint64_t MemManagerImpl::GetMaxLSN(const MemList& tables) { uint64_t max_lsn = 0; - for (auto& table : tables) { - auto cur_lsn = table->GetLSN(); - if (table->GetLSN() > max_lsn) { + for (auto& collection : tables) { + auto cur_lsn = collection->GetLSN(); + if (collection->GetLSN() > max_lsn) { max_lsn = cur_lsn; } } diff --git a/core/src/db/insert/MemManagerImpl.h b/core/src/db/insert/MemManagerImpl.h index 22a9267920..e729fadbf0 100644 --- a/core/src/db/insert/MemManagerImpl.h +++ b/core/src/db/insert/MemManagerImpl.h @@ -41,21 +41,21 @@ class MemManagerImpl : public MemManager, public server::CacheConfigHandler { } Status - InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim, + InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim, const float* vectors, uint64_t lsn, std::set& flushed_tables) override; Status - InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim, + InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim, const uint8_t* vectors, uint64_t lsn, std::set& flushed_tables) override; Status - DeleteVector(const std::string& table_id, IDNumber vector_id, uint64_t lsn) override; + DeleteVector(const std::string& collection_id, IDNumber vector_id, uint64_t lsn) override; Status - DeleteVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) override; + DeleteVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) override; Status - Flush(const std::string& table_id, bool apply_delete = true) override; + Flush(const std::string& collection_id, bool apply_delete = true) override; Status Flush(std::set& table_ids, bool apply_delete = true) override; @@ -64,7 +64,7 @@ class MemManagerImpl : public MemManager, public server::CacheConfigHandler { // Serialize(std::set& table_ids) override; Status - EraseMemVector(const std::string& table_id) override; + EraseMemVector(const std::string& collection_id) override; size_t GetCurrentMutableMem() override; @@ -81,16 +81,16 @@ class MemManagerImpl : public MemManager, public server::CacheConfigHandler { private: MemTablePtr - GetMemByTable(const std::string& table_id); + GetMemByTable(const std::string& collection_id); Status - InsertVectorsNoLock(const std::string& table_id, const VectorSourcePtr& source, uint64_t lsn); + InsertVectorsNoLock(const std::string& collection_id, const VectorSourcePtr& source, uint64_t lsn); Status ToImmutable(); Status - ToImmutable(const std::string& table_id); + ToImmutable(const std::string& collection_id); uint64_t GetMaxLSN(const MemList& tables); diff --git a/core/src/db/insert/MemTable.cpp b/core/src/db/insert/MemTable.cpp index fce8d62e5b..821e1b3bc1 100644 --- a/core/src/db/insert/MemTable.cpp +++ b/core/src/db/insert/MemTable.cpp @@ -26,8 +26,8 @@ namespace milvus { namespace engine { -MemTable::MemTable(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options) - : table_id_(table_id), meta_(meta), options_(options) { +MemTable::MemTable(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options) + : collection_id_(collection_id), meta_(meta), options_(options) { SetIdentity("MemTable"); AddCacheInsertDataListener(); } @@ -42,7 +42,7 @@ MemTable::Add(const VectorSourcePtr& source) { Status status; if (mem_table_file_list_.empty() || current_mem_table_file->IsFull()) { - MemTableFilePtr new_mem_table_file = std::make_shared(table_id_, meta_, options_); + MemTableFilePtr new_mem_table_file = std::make_shared(collection_id_, meta_, options_); status = new_mem_table_file->Add(source); if (status.ok()) { mem_table_file_list_.emplace_back(new_mem_table_file); @@ -62,7 +62,7 @@ MemTable::Add(const VectorSourcePtr& source) { Status MemTable::Delete(segment::doc_id_t doc_id) { - // Locate which table file the doc id lands in + // Locate which collection file the doc id lands in for (auto& table_file : mem_table_file_list_) { table_file->Delete(doc_id); } @@ -74,7 +74,7 @@ MemTable::Delete(segment::doc_id_t doc_id) { Status MemTable::Delete(const std::vector& doc_ids) { - // Locate which table file the doc id lands in + // Locate which collection file the doc id lands in for (auto& table_file : mem_table_file_list_) { table_file->Delete(doc_ids); } @@ -122,7 +122,7 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) { } // Update flush lsn - auto status = meta_->UpdateTableFlushLSN(table_id_, wal_lsn); + auto status = meta_->UpdateTableFlushLSN(collection_id_, wal_lsn); if (!status.ok()) { std::string err_msg = "Failed to write flush lsn to meta: " + status.ToString(); ENGINE_LOG_ERROR << err_msg; @@ -131,7 +131,7 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) { auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration diff = end - start; - ENGINE_LOG_DEBUG << "Finished flushing for table " << table_id_ << " in " << diff.count() << " s"; + ENGINE_LOG_DEBUG << "Finished flushing for collection " << collection_id_ << " in " << diff.count() << " s"; return Status::OK(); } @@ -143,7 +143,7 @@ MemTable::Empty() { const std::string& MemTable::GetTableId() const { - return table_id_; + return collection_id_; } size_t @@ -159,7 +159,7 @@ MemTable::GetCurrentMem() { Status MemTable::ApplyDeletes() { // Applying deletes to other segments on disk and their corresponding cache: - // For each segment in table: + // For each segment in collection: // Load its bloom filter // For each id in delete list: // If present, add the uid to segment's uid list @@ -173,16 +173,16 @@ MemTable::ApplyDeletes() { // Serialize segment's deletedDoc TODO(zhiru): append directly to previous file for now, may have duplicates // Serialize bloom filter - ENGINE_LOG_DEBUG << "Applying " << doc_ids_to_delete_.size() << " deletes in table: " << table_id_; + ENGINE_LOG_DEBUG << "Applying " << doc_ids_to_delete_.size() << " deletes in collection: " << collection_id_; auto start_total = std::chrono::high_resolution_clock::now(); // auto start = std::chrono::high_resolution_clock::now(); - std::vector file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX, - meta::TableFileSchema::FILE_TYPE::BACKUP}; - meta::TableFilesSchema table_files; - auto status = meta_->FilesByType(table_id_, file_types, table_files); + std::vector file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX, + meta::SegmentSchema::FILE_TYPE::BACKUP}; + meta::SegmentsSchema table_files; + auto status = meta_->FilesByType(collection_id_, file_types, table_files); if (!status.ok()) { std::string err_msg = "Failed to apply deletes: " + status.ToString(); ENGINE_LOG_ERROR << err_msg; @@ -209,7 +209,7 @@ MemTable::ApplyDeletes() { } } - meta::TableFilesSchema files_to_check; + meta::SegmentsSchema files_to_check; for (auto& kv : ids_to_check_map) { files_to_check.emplace_back(table_files[kv.first]); } @@ -222,7 +222,7 @@ MemTable::ApplyDeletes() { std::chrono::duration diff0 = time0 - start_total; ENGINE_LOG_DEBUG << "Found " << ids_to_check_map.size() << " segment to apply deletes in " << diff0.count() << " s"; - meta::TableFilesSchema table_files_to_update; + meta::SegmentsSchema table_files_to_update; for (auto& kv : ids_to_check_map) { auto& table_file = table_files[kv.first]; @@ -235,7 +235,7 @@ MemTable::ApplyDeletes() { segment::SegmentReader segment_reader(segment_dir); auto& segment_id = table_file.segment_id_; - meta::TableFilesSchema segment_files; + meta::SegmentsSchema segment_files; status = meta_->GetTableFilesBySegmentId(segment_id, segment_files); if (!status.ok()) { break; @@ -351,10 +351,10 @@ MemTable::ApplyDeletes() { ENGINE_LOG_DEBUG << "Updated bloom filter in segment: " << table_file.segment_id_ << " in " << diff5.count() << " s"; - // Update table file row count + // Update collection file row count for (auto& file : segment_files) { - if (file.file_type_ == meta::TableFileSchema::RAW || file.file_type_ == meta::TableFileSchema::TO_INDEX || - file.file_type_ == meta::TableFileSchema::INDEX || file.file_type_ == meta::TableFileSchema::BACKUP) { + if (file.file_type_ == meta::SegmentSchema::RAW || file.file_type_ == meta::SegmentSchema::TO_INDEX || + file.file_type_ == meta::SegmentSchema::INDEX || file.file_type_ == meta::SegmentSchema::BACKUP) { file.row_count_ -= delete_count; table_files_to_update.emplace_back(file); } @@ -362,8 +362,8 @@ MemTable::ApplyDeletes() { auto time7 = std::chrono::high_resolution_clock::now(); std::chrono::duration diff6 = time7 - time6; diff6 = time6 - time5; - ENGINE_LOG_DEBUG << "Update table file row count in vector of segment: " << table_file.segment_id_ << " in " - << diff6.count() << " s"; + ENGINE_LOG_DEBUG << "Update collection file row count in vector of segment: " << table_file.segment_id_ + << " in " << diff6.count() << " s"; } auto time7 = std::chrono::high_resolution_clock::now(); @@ -380,9 +380,9 @@ MemTable::ApplyDeletes() { auto end_total = std::chrono::high_resolution_clock::now(); std::chrono::duration diff7 = end_total - time7; - ENGINE_LOG_DEBUG << "Update deletes to meta in table " << table_id_ << " in " << diff7.count() << " s"; + ENGINE_LOG_DEBUG << "Update deletes to meta in collection " << collection_id_ << " in " << diff7.count() << " s"; std::chrono::duration diff_total = end_total - start_total; - ENGINE_LOG_DEBUG << "Finished applying deletes in table " << table_id_ << " in " << diff_total.count() << " s"; + ENGINE_LOG_DEBUG << "Finished deletes in collection " << collection_id_ << " in " << diff_total.count() << " s"; OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_check); diff --git a/core/src/db/insert/MemTable.h b/core/src/db/insert/MemTable.h index c954b253ba..f5e2f89024 100644 --- a/core/src/db/insert/MemTable.h +++ b/core/src/db/insert/MemTable.h @@ -30,7 +30,7 @@ class MemTable : public server::CacheConfigHandler { public: using MemTableFileList = std::vector; - MemTable(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options); + MemTable(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options); Status Add(const VectorSourcePtr& source); @@ -74,7 +74,7 @@ class MemTable : public server::CacheConfigHandler { ApplyDeletes(); private: - const std::string table_id_; + const std::string collection_id_; MemTableFileList mem_table_file_list_; diff --git a/core/src/db/insert/MemTableFile.cpp b/core/src/db/insert/MemTableFile.cpp index 96a6af9299..11c0aa2ba3 100644 --- a/core/src/db/insert/MemTableFile.cpp +++ b/core/src/db/insert/MemTableFile.cpp @@ -28,8 +28,8 @@ namespace milvus { namespace engine { -MemTableFile::MemTableFile(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options) - : table_id_(table_id), meta_(meta), options_(options) { +MemTableFile::MemTableFile(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options) + : collection_id_(collection_id), meta_(meta), options_(options) { current_mem_ = 0; auto status = CreateTableFile(); if (status.ok()) { @@ -47,8 +47,8 @@ MemTableFile::MemTableFile(const std::string& table_id, const meta::MetaPtr& met Status MemTableFile::CreateTableFile() { - meta::TableFileSchema table_file_schema; - table_file_schema.table_id_ = table_id_; + meta::SegmentSchema table_file_schema; + table_file_schema.collection_id_ = collection_id_; auto status = meta_->CreateTableFile(table_file_schema); if (status.ok()) { table_file_schema_ = table_file_schema; @@ -64,9 +64,9 @@ MemTableFile::Add(const VectorSourcePtr& source) { if (table_file_schema_.dimension_ <= 0) { std::string err_msg = "MemTableFile::Add: table_file_schema dimension = " + std::to_string(table_file_schema_.dimension_) + - ", table_id = " + table_file_schema_.table_id_; + ", collection_id = " + table_file_schema_.collection_id_; ENGINE_LOG_ERROR << err_msg; - return Status(DB_ERROR, "Not able to create table file"); + return Status(DB_ERROR, "Not able to create collection file"); } size_t single_vector_mem_size = source->SingleVectorSize(table_file_schema_.dimension_); @@ -162,11 +162,11 @@ MemTableFile::Serialize(uint64_t wal_lsn) { if (!status.ok()) { ENGINE_LOG_ERROR << "Failed to serialize segment: " << table_file_schema_.segment_id_; - /* Can't mark it as to_delete because data is stored in this mem table file. Any further flush - * will try to serialize the same mem table file and it won't be able to find the directory - * to write to or update the associated table file in meta. + /* Can't mark it as to_delete because data is stored in this mem collection file. Any further flush + * will try to serialize the same mem collection file and it won't be able to find the directory + * to write to or update the associated collection file in meta. * - table_file_schema_.file_type_ = meta::TableFileSchema::TO_DELETE; + table_file_schema_.file_type_ = meta::SegmentSchema::TO_DELETE; meta_->UpdateTableFile(table_file_schema_); ENGINE_LOG_DEBUG << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_ << " to to_delete"; @@ -186,19 +186,19 @@ MemTableFile::Serialize(uint64_t wal_lsn) { // else set file type to RAW, no need to build index if (table_file_schema_.engine_type_ != (int)EngineType::FAISS_IDMAP && table_file_schema_.engine_type_ != (int)EngineType::FAISS_BIN_IDMAP) { - table_file_schema_.file_type_ = (size >= table_file_schema_.index_file_size_) ? meta::TableFileSchema::TO_INDEX - : meta::TableFileSchema::RAW; + table_file_schema_.file_type_ = + (size >= table_file_schema_.index_file_size_) ? meta::SegmentSchema::TO_INDEX : meta::SegmentSchema::RAW; } else { - table_file_schema_.file_type_ = meta::TableFileSchema::RAW; + table_file_schema_.file_type_ = meta::SegmentSchema::RAW; } - // Set table file's flush_lsn so WAL can roll back and delete garbage files which can be obtained from + // Set collection file's flush_lsn so WAL can roll back and delete garbage files which can be obtained from // GetTableFilesByFlushLSN() in meta. table_file_schema_.flush_lsn_ = wal_lsn; status = meta_->UpdateTableFile(table_file_schema_); - ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index") + ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index") << " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn; // TODO(zhiru): cache diff --git a/core/src/db/insert/MemTableFile.h b/core/src/db/insert/MemTableFile.h index 9cc636dc9f..8168bdc92e 100644 --- a/core/src/db/insert/MemTableFile.h +++ b/core/src/db/insert/MemTableFile.h @@ -28,7 +28,7 @@ namespace engine { class MemTableFile : public server::CacheConfigHandler { public: - MemTableFile(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options); + MemTableFile(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options); ~MemTableFile() = default; @@ -66,8 +66,8 @@ class MemTableFile : public server::CacheConfigHandler { CreateTableFile(); private: - const std::string table_id_; - meta::TableFileSchema table_file_schema_; + const std::string collection_id_; + meta::SegmentSchema table_file_schema_; meta::MetaPtr meta_; DBOptions options_; size_t current_mem_; diff --git a/core/src/db/insert/VectorSource.cpp b/core/src/db/insert/VectorSource.cpp index 099eeed458..0af40271ce 100644 --- a/core/src/db/insert/VectorSource.cpp +++ b/core/src/db/insert/VectorSource.cpp @@ -28,7 +28,7 @@ VectorSource::VectorSource(VectorsData vectors) : vectors_(std::move(vectors)) { Status VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment::SegmentWriterPtr& segment_writer_ptr, - const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, + const meta::SegmentSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added) { uint64_t n = vectors_.vector_count_; server::CollectAddMetrics metrics(n, table_file_schema.dimension_); diff --git a/core/src/db/insert/VectorSource.h b/core/src/db/insert/VectorSource.h index b92a87e3ff..70bc4e43ca 100644 --- a/core/src/db/insert/VectorSource.h +++ b/core/src/db/insert/VectorSource.h @@ -30,7 +30,7 @@ class VectorSource { Status Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment::SegmentWriterPtr& segment_writer_ptr, - const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added); + const meta::SegmentSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added); size_t GetNumVectorsAdded(); diff --git a/core/src/db/meta/Meta.h b/core/src/db/meta/Meta.h index 11c9e75b4f..1b76b8724c 100644 --- a/core/src/db/meta/Meta.h +++ b/core/src/db/meta/Meta.h @@ -35,7 +35,7 @@ class Meta { class CleanUpFilter { public: virtual bool - IsIgnored(const TableFileSchema& schema) = 0; + IsIgnored(const SegmentSchema& schema) = 0; }; */ @@ -43,92 +43,92 @@ class Meta { virtual ~Meta() = default; virtual Status - CreateTable(TableSchema& table_schema) = 0; + CreateTable(CollectionSchema& table_schema) = 0; virtual Status - DescribeTable(TableSchema& table_schema) = 0; + DescribeTable(CollectionSchema& table_schema) = 0; virtual Status - HasTable(const std::string& table_id, bool& has_or_not) = 0; + HasTable(const std::string& collection_id, bool& has_or_not) = 0; virtual Status - AllTables(std::vector& table_schema_array) = 0; + AllTables(std::vector& table_schema_array) = 0; virtual Status - UpdateTableFlag(const std::string& table_id, int64_t flag) = 0; + UpdateTableFlag(const std::string& collection_id, int64_t flag) = 0; virtual Status - UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) = 0; + UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) = 0; virtual Status - GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) = 0; + GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) = 0; virtual Status - GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) = 0; + GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) = 0; virtual Status - DropTable(const std::string& table_id) = 0; + DropTable(const std::string& collection_id) = 0; virtual Status - DeleteTableFiles(const std::string& table_id) = 0; + DeleteTableFiles(const std::string& collection_id) = 0; virtual Status - CreateTableFile(TableFileSchema& file_schema) = 0; + CreateTableFile(SegmentSchema& file_schema) = 0; virtual Status - GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) = 0; + GetTableFiles(const std::string& collection_id, const std::vector& ids, SegmentsSchema& table_files) = 0; virtual Status - GetTableFilesBySegmentId(const std::string& segment_id, TableFilesSchema& table_files) = 0; + GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) = 0; virtual Status - UpdateTableFile(TableFileSchema& file_schema) = 0; + UpdateTableFile(SegmentSchema& file_schema) = 0; virtual Status - UpdateTableFiles(TableFilesSchema& files) = 0; + UpdateTableFiles(SegmentsSchema& files) = 0; virtual Status - UpdateTableFilesRowCount(TableFilesSchema& files) = 0; + UpdateTableFilesRowCount(SegmentsSchema& files) = 0; virtual Status - UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0; + UpdateTableIndex(const std::string& collection_id, const TableIndex& index) = 0; virtual Status - UpdateTableFilesToIndex(const std::string& table_id) = 0; + UpdateTableFilesToIndex(const std::string& collection_id) = 0; virtual Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0; + DescribeTableIndex(const std::string& collection_id, TableIndex& index) = 0; virtual Status - DropTableIndex(const std::string& table_id) = 0; + DropTableIndex(const std::string& collection_id) = 0; virtual Status - CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag, + CreatePartition(const std::string& collection_name, const std::string& partition_name, const std::string& tag, uint64_t lsn) = 0; virtual Status DropPartition(const std::string& partition_name) = 0; virtual Status - ShowPartitions(const std::string& table_name, std::vector& partition_schema_array) = 0; + ShowPartitions(const std::string& collection_name, std::vector& partition_schema_array) = 0; virtual Status - GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0; + GetPartitionName(const std::string& collection_name, const std::string& tag, std::string& partition_name) = 0; virtual Status - FilesToSearch(const std::string& table_id, TableFilesSchema& files) = 0; + FilesToSearch(const std::string& collection_id, SegmentsSchema& files) = 0; virtual Status - FilesToMerge(const std::string& table_id, TableFilesSchema& files) = 0; + FilesToMerge(const std::string& collection_id, SegmentsSchema& files) = 0; virtual Status - FilesToIndex(TableFilesSchema&) = 0; + FilesToIndex(SegmentsSchema&) = 0; virtual Status - FilesByType(const std::string& table_id, const std::vector& file_types, TableFilesSchema& files) = 0; + FilesByType(const std::string& collection_id, const std::vector& file_types, SegmentsSchema& files) = 0; virtual Status - FilesByID(const std::vector& ids, TableFilesSchema& files) = 0; + FilesByID(const std::vector& ids, SegmentsSchema& files) = 0; virtual Status Size(uint64_t& result) = 0; @@ -146,7 +146,7 @@ class Meta { DropAll() = 0; virtual Status - Count(const std::string& table_id, uint64_t& result) = 0; + Count(const std::string& collection_id, uint64_t& result) = 0; virtual Status SetGlobalLastLSN(uint64_t lsn) = 0; diff --git a/core/src/db/meta/MetaTypes.h b/core/src/db/meta/MetaTypes.h index 611df80d98..8cf3cd92b7 100644 --- a/core/src/db/meta/MetaTypes.h +++ b/core/src/db/meta/MetaTypes.h @@ -40,14 +40,14 @@ struct EnvironmentSchema { uint64_t global_lsn_ = 0; }; // EnvironmentSchema -struct TableSchema { +struct CollectionSchema { typedef enum { NORMAL, TO_DELETE, } TABLE_STATE; size_t id_ = 0; - std::string table_id_; + std::string collection_id_; int32_t state_ = (int)NORMAL; uint16_t dimension_ = 0; int64_t created_on_ = 0; @@ -60,9 +60,9 @@ struct TableSchema { std::string partition_tag_; std::string version_ = CURRENT_VERSION; uint64_t flush_lsn_ = 0; -}; // TableSchema +}; // CollectionSchema -struct TableFileSchema { +struct SegmentSchema { typedef enum { NEW, RAW, @@ -75,7 +75,7 @@ struct TableFileSchema { } FILE_TYPE; size_t id_ = 0; - std::string table_id_; + std::string collection_id_; std::string segment_id_; std::string file_id_; int32_t file_type_ = NEW; @@ -92,10 +92,10 @@ struct TableFileSchema { std::string index_params_; // not persist to meta int32_t metric_type_ = DEFAULT_METRIC_TYPE; // not persist to meta uint64_t flush_lsn_ = 0; -}; // TableFileSchema +}; // SegmentSchema -using TableFileSchemaPtr = std::shared_ptr; -using TableFilesSchema = std::vector; +using SegmentSchemaPtr = std::shared_ptr; +using SegmentsSchema = std::vector; } // namespace meta } // namespace engine diff --git a/core/src/db/meta/MySQLMetaImpl.cpp b/core/src/db/meta/MySQLMetaImpl.cpp index 696b7cd7c2..5c8e546e34 100644 --- a/core/src/db/meta/MySQLMetaImpl.cpp +++ b/core/src/db/meta/MySQLMetaImpl.cpp @@ -91,6 +91,7 @@ class MetaField { }; using MetaFields = std::vector; + class MetaSchema { public: MetaSchema(const std::string& name, const MetaFields& fields) : name_(name), fields_(fields) { @@ -181,12 +182,12 @@ MySQLMetaImpl::~MySQLMetaImpl() { } Status -MySQLMetaImpl::NextTableId(std::string& table_id) { +MySQLMetaImpl::NextTableId(std::string& collection_id) { std::lock_guard lock(genid_mutex_); // avoid duplicated id std::stringstream ss; SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance(); ss << id_generator.GetNextIDNumber(); - table_id = ss.str(); + collection_id = ss.str(); return Status::OK(); } @@ -230,7 +231,7 @@ MySQLMetaImpl::ValidateMetaSchema() { exist_fields.push_back(MetaField(name, type, "")); } } catch (std::exception& e) { - ENGINE_LOG_DEBUG << "Meta table '" << schema.name() << "' not exist and will be created"; + ENGINE_LOG_DEBUG << "Meta collection '" << schema.name() << "' not exist and will be created"; } if (exist_fields.empty()) { @@ -319,7 +320,7 @@ MySQLMetaImpl::Initialize() { throw Exception(DB_INVALID_META_URI, msg); } - // step 7: create meta table Tables + // step 7: create meta collection Tables mysqlpp::Query InitializeQuery = connectionPtr->query(); InitializeQuery << "CREATE TABLE IF NOT EXISTS " << TABLES_SCHEMA.name() << " (" << TABLES_SCHEMA.ToString() + ");"; @@ -329,12 +330,12 @@ MySQLMetaImpl::Initialize() { bool initialize_query_exec = InitializeQuery.exec(); fiu_do_on("MySQLMetaImpl.Initialize.fail_create_table_scheme", initialize_query_exec = false); if (!initialize_query_exec) { - std::string msg = "Failed to create meta table 'Tables' in MySQL"; + std::string msg = "Failed to create meta collection 'Tables' in MySQL"; ENGINE_LOG_ERROR << msg; throw Exception(DB_META_TRANSACTION_FAILED, msg); } - // step 8: create meta table TableFiles + // step 8: create meta collection TableFiles InitializeQuery << "CREATE TABLE IF NOT EXISTS " << TABLEFILES_SCHEMA.name() << " (" << TABLEFILES_SCHEMA.ToString() + ");"; @@ -343,7 +344,7 @@ MySQLMetaImpl::Initialize() { initialize_query_exec = InitializeQuery.exec(); fiu_do_on("MySQLMetaImpl.Initialize.fail_create_table_files", initialize_query_exec = false); if (!initialize_query_exec) { - std::string msg = "Failed to create meta table 'TableFiles' in MySQL"; + std::string msg = "Failed to create meta collection 'TableFiles' in MySQL"; ENGINE_LOG_ERROR << msg; throw Exception(DB_META_TRANSACTION_FAILED, msg); } @@ -352,7 +353,7 @@ MySQLMetaImpl::Initialize() { } Status -MySQLMetaImpl::CreateTable(TableSchema& table_schema) { +MySQLMetaImpl::CreateTable(CollectionSchema& table_schema) { try { server::MetricCollector metric; { @@ -367,11 +368,11 @@ MySQLMetaImpl::CreateTable(TableSchema& table_schema) { mysqlpp::Query createTableQuery = connectionPtr->query(); - if (table_schema.table_id_.empty()) { - NextTableId(table_schema.table_id_); + if (table_schema.collection_id_.empty()) { + NextTableId(table_schema.collection_id_); } else { createTableQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote - << table_schema.table_id_ << ";"; + << table_schema.collection_id_ << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); @@ -379,11 +380,12 @@ MySQLMetaImpl::CreateTable(TableSchema& table_schema) { if (res.num_rows() == 1) { int state = res[0]["state"]; - fiu_do_on("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE", state = TableSchema::TO_DELETE); - if (TableSchema::TO_DELETE == state) { - return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); + fiu_do_on("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE", state = CollectionSchema::TO_DELETE); + if (CollectionSchema::TO_DELETE == state) { + return Status(DB_ERROR, + "Collection already exists and it is in delete state, please wait a second"); } else { - return Status(DB_ALREADY_EXIST, "Table already exists"); + return Status(DB_ALREADY_EXIST, "Collection already exists"); } } } @@ -392,7 +394,7 @@ MySQLMetaImpl::CreateTable(TableSchema& table_schema) { table_schema.created_on_ = utils::GetMicroSecTimeStamp(); std::string id = "NULL"; // auto-increment - std::string& table_id = table_schema.table_id_; + std::string& collection_id = table_schema.collection_id_; std::string state = std::to_string(table_schema.state_); std::string dimension = std::to_string(table_schema.dimension_); std::string created_on = std::to_string(table_schema.created_on_); @@ -406,11 +408,12 @@ MySQLMetaImpl::CreateTable(TableSchema& table_schema) { std::string& version = table_schema.version_; std::string flush_lsn = std::to_string(table_schema.flush_lsn_); - createTableQuery << "INSERT INTO " << META_TABLES << " VALUES(" << id << ", " << mysqlpp::quote << table_id - << ", " << state << ", " << dimension << ", " << created_on << ", " << flag << ", " - << index_file_size << ", " << engine_type << ", " << mysqlpp::quote << index_params << ", " - << metric_type << ", " << mysqlpp::quote << owner_table << ", " << mysqlpp::quote - << partition_tag << ", " << mysqlpp::quote << version << ", " << flush_lsn << ");"; + createTableQuery << "INSERT INTO " << META_TABLES << " VALUES(" << id << ", " << mysqlpp::quote + << collection_id << ", " << state << ", " << dimension << ", " << created_on << ", " + << flag << ", " << index_file_size << ", " << engine_type << ", " << mysqlpp::quote + << index_params << ", " << metric_type << ", " << mysqlpp::quote << owner_table << ", " + << mysqlpp::quote << partition_tag << ", " << mysqlpp::quote << version << ", " + << flush_lsn << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); @@ -419,19 +422,19 @@ MySQLMetaImpl::CreateTable(TableSchema& table_schema) { // Consume all results to avoid "Commands out of sync" error } else { - return HandleException("Add Table Error", createTableQuery.error()); + return HandleException("Add Collection Error", createTableQuery.error()); } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; - return utils::CreateTablePath(options_, table_schema.table_id_); + ENGINE_LOG_DEBUG << "Successfully create collection: " << table_schema.collection_id_; + return utils::CreateTablePath(options_, table_schema.collection_id_); } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN CREATING TABLE", e.what()); } } Status -MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { +MySQLMetaImpl::DescribeTable(CollectionSchema& table_schema) { try { server::MetricCollector metric; mysqlpp::StoreQueryResult res; @@ -449,8 +452,8 @@ MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { describeTableQuery << "SELECT id, state, dimension, created_on, flag, index_file_size, engine_type, index_params" << " , metric_type ,owner_table, partition_tag, version, flush_lsn" - << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ - << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_schema.collection_id_ + << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTable: " << describeTableQuery.str(); @@ -473,7 +476,7 @@ MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { resRow["version"].to_string(table_schema.version_); table_schema.flush_lsn_ = resRow["flush_lsn"]; } else { - return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); + return Status(DB_NOT_FOUND, "Collection " + table_schema.collection_id_ + " not found"); } } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN DESCRIBING TABLE", e.what()); @@ -483,7 +486,7 @@ MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { } Status -MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { +MySQLMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) { try { server::MetricCollector metric; mysqlpp::StoreQueryResult res; @@ -498,10 +501,10 @@ MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { } mysqlpp::Query hasTableQuery = connectionPtr->query(); - // since table_id is a unique column we just need to check whether it exists or not + // since collection_id is a unique column we just need to check whether it exists or not hasTableQuery << "SELECT EXISTS" - << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ")" + << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << collection_id << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ")" << " AS " << mysqlpp::quote << "check" << ";"; @@ -520,7 +523,7 @@ MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { } Status -MySQLMetaImpl::AllTables(std::vector& table_schema_array) { +MySQLMetaImpl::AllTables(std::vector& table_schema_array) { try { server::MetricCollector metric; mysqlpp::StoreQueryResult res; @@ -537,8 +540,8 @@ MySQLMetaImpl::AllTables(std::vector& table_schema_array) { mysqlpp::Query allTablesQuery = connectionPtr->query(); allTablesQuery << "SELECT id, table_id, dimension, engine_type, index_params, index_file_size, metric_type" << " ,owner_table, partition_tag, version, flush_lsn" - << " FROM " << META_TABLES << " WHERE state <> " << std::to_string(TableSchema::TO_DELETE) - << " AND owner_table = \"\";"; + << " FROM " << META_TABLES << " WHERE state <> " + << std::to_string(CollectionSchema::TO_DELETE) << " AND owner_table = \"\";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allTablesQuery.str(); @@ -546,9 +549,9 @@ MySQLMetaImpl::AllTables(std::vector& table_schema_array) { } // Scoped Connection for (auto& resRow : res) { - TableSchema table_schema; + CollectionSchema table_schema; table_schema.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_schema.table_id_); + resRow["table_id"].to_string(table_schema.collection_id_); table_schema.dimension_ = resRow["dimension"]; table_schema.index_file_size_ = resRow["index_file_size"]; table_schema.engine_type_ = resRow["engine_type"]; @@ -569,7 +572,7 @@ MySQLMetaImpl::AllTables(std::vector& table_schema_array) { } Status -MySQLMetaImpl::DropTable(const std::string& table_id) { +MySQLMetaImpl::DropTable(const std::string& collection_id) { try { server::MetricCollector metric; { @@ -583,11 +586,12 @@ MySQLMetaImpl::DropTable(const std::string& table_id) { return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } - // soft delete table + // soft delete collection mysqlpp::Query deleteTableQuery = connectionPtr->query(); // - deleteTableQuery << "UPDATE " << META_TABLES << " SET state = " << std::to_string(TableSchema::TO_DELETE) - << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + deleteTableQuery << "UPDATE " << META_TABLES + << " SET state = " << std::to_string(CollectionSchema::TO_DELETE) + << " WHERE table_id = " << mysqlpp::quote << collection_id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTable: " << deleteTableQuery.str(); @@ -599,10 +603,10 @@ MySQLMetaImpl::DropTable(const std::string& table_id) { bool is_writable_mode{mode_ == DBOptions::MODE::CLUSTER_WRITABLE}; fiu_do_on("MySQLMetaImpl.DropTable.CLUSTER_WRITABLE_MODE", is_writable_mode = true); if (is_writable_mode) { - DeleteTableFiles(table_id); + DeleteTableFiles(collection_id); } - ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully delete collection, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN DELETING TABLE", e.what()); } @@ -611,7 +615,7 @@ MySQLMetaImpl::DropTable(const std::string& table_id) { } Status -MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { +MySQLMetaImpl::DeleteTableFiles(const std::string& collection_id) { try { server::MetricCollector metric; { @@ -625,14 +629,14 @@ MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } - // soft delete table files + // soft delete collection files mysqlpp::Query deleteTableFilesQuery = connectionPtr->query(); // deleteTableFilesQuery << "UPDATE " << META_TABLEFILES - << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " SET file_type = " << std::to_string(SegmentSchema::TO_DELETE) << " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) - << " WHERE table_id = " << mysqlpp::quote << table_id << " AND file_type <> " - << std::to_string(TableFileSchema::TO_DELETE) << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id << " AND file_type <> " + << std::to_string(SegmentSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTableFiles: " << deleteTableFilesQuery.str(); @@ -641,7 +645,7 @@ MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully delete collection files, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN DELETING TABLE FILES", e.what()); } @@ -650,12 +654,12 @@ MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { } Status -MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { +MySQLMetaImpl::CreateTableFile(SegmentSchema& file_schema) { if (file_schema.date_ == EmptyDate) { file_schema.date_ = utils::GetDate(); } - TableSchema table_schema; - table_schema.table_id_ = file_schema.table_id_; + CollectionSchema table_schema; + table_schema.collection_id_ = file_schema.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -679,7 +683,7 @@ MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { file_schema.metric_type_ = table_schema.metric_type_; std::string id = "NULL"; // auto-increment - std::string table_id = file_schema.table_id_; + std::string collection_id = file_schema.collection_id_; std::string segment_id = file_schema.segment_id_; std::string engine_type = std::to_string(file_schema.engine_type_); std::string file_id = file_schema.file_id_; @@ -704,7 +708,7 @@ MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { mysqlpp::Query createTableFileQuery = connectionPtr->query(); createTableFileQuery << "INSERT INTO " << META_TABLEFILES << " VALUES(" << id << ", " << mysqlpp::quote - << table_id << ", " << mysqlpp::quote << segment_id << ", " << engine_type << ", " + << collection_id << ", " << mysqlpp::quote << segment_id << ", " << engine_type << ", " << mysqlpp::quote << file_id << ", " << file_type << ", " << file_size << ", " << row_count << ", " << updated_time << ", " << created_on << ", " << date << ", " << flush_lsn << ");"; @@ -720,7 +724,7 @@ MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; + ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_; return utils::CreateTableFilePath(options_, file_schema); } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN CREATING TABLE FILE", e.what()); @@ -728,8 +732,8 @@ MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { } Status -MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, - TableFilesSchema& table_files) { +MySQLMetaImpl::GetTableFiles(const std::string& collection_id, const std::vector& ids, + SegmentsSchema& table_files) { if (ids.empty()) { return Status::OK(); } @@ -756,24 +760,24 @@ MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vectorquery(); getTableFileQuery << "SELECT id, segment_id, engine_type, file_id, file_type, file_size, row_count, date, created_on" - << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id << " AND (" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id << " AND (" << idStr << ")" - << " AND file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + << " AND file_type <> " << std::to_string(SegmentSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFiles: " << getTableFileQuery.str(); res = getTableFileQuery.store(); } // Scoped Connection - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; DescribeTable(table_schema); Status ret; for (auto& resRow : res) { - TableFileSchema file_schema; + SegmentSchema file_schema; file_schema.id_ = resRow["id"]; - file_schema.table_id_ = table_id; + file_schema.collection_id_ = collection_id; resRow["segment_id"].to_string(file_schema.segment_id_); file_schema.index_file_size_ = table_schema.index_file_size_; file_schema.engine_type_ = resRow["engine_type"]; @@ -791,7 +795,7 @@ MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vector " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + << " AND file_type <> " << std::to_string(SegmentSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFilesBySegmentId: " << getTableFileQuery.str(); @@ -822,17 +826,17 @@ MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id, } // Scoped Connection if (!res.empty()) { - TableSchema table_schema; - res[0]["table_id"].to_string(table_schema.table_id_); + CollectionSchema table_schema; + res[0]["table_id"].to_string(table_schema.collection_id_); auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } for (auto& resRow : res) { - TableFileSchema file_schema; + SegmentSchema file_schema; file_schema.id_ = resRow["id"]; - file_schema.table_id_ = table_schema.table_id_; + file_schema.collection_id_ = table_schema.collection_id_; resRow["segment_id"].to_string(file_schema.segment_id_); file_schema.index_file_size_ = table_schema.index_file_size_; file_schema.engine_type_ = resRow["engine_type"]; @@ -851,7 +855,7 @@ MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id, } } - ENGINE_LOG_DEBUG << "Get table files by segment id"; + ENGINE_LOG_DEBUG << "Get collection files by segment id"; return Status::OK(); } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN RETRIEVING TABLE FILES BY SEGMENT ID", e.what()); @@ -859,7 +863,7 @@ MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id, } Status -MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { +MySQLMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIndex& index) { try { server::MetricCollector metric; @@ -875,8 +879,9 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& i mysqlpp::Query updateTableIndexParamQuery = connectionPtr->query(); updateTableIndexParamQuery << "SELECT id, state, dimension, created_on" - << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << collection_id << " AND state <> " + << std::to_string(CollectionSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); @@ -895,7 +900,7 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& i << " ,engine_type = " << index.engine_type_ << " ,index_params = " << mysqlpp::quote << index.extra_params_.dump() << " ,metric_type = " << index.metric_type_ - << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); @@ -904,11 +909,11 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& i updateTableIndexParamQuery.error()); } } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + return Status(DB_NOT_FOUND, "Collection " + collection_id + " not found"); } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully update collection index, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE INDEX PARAM", e.what()); } @@ -917,7 +922,7 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& i } Status -MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { +MySQLMetaImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) { try { server::MetricCollector metric; @@ -933,7 +938,7 @@ MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { mysqlpp::Query updateTableFlagQuery = connectionPtr->query(); updateTableFlagQuery << "UPDATE " << META_TABLES << " SET flag = " << flag - << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlag: " << updateTableFlagQuery.str(); @@ -942,7 +947,7 @@ MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully update collection flag, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); } @@ -951,7 +956,7 @@ MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { } Status -MySQLMetaImpl::UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) { +MySQLMetaImpl::UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) { try { server::MetricCollector metric; @@ -964,7 +969,7 @@ MySQLMetaImpl::UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_l mysqlpp::Query updateTableFlagQuery = connectionPtr->query(); updateTableFlagQuery << "UPDATE " << META_TABLES << " SET flush_lsn = " << flush_lsn - << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlushLSN: " << updateTableFlagQuery.str(); @@ -973,7 +978,7 @@ MySQLMetaImpl::UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_l } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully update table flush_lsn, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully update collection flush_lsn, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLUSH_LSN", e.what()); } @@ -982,12 +987,12 @@ MySQLMetaImpl::UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_l } Status -MySQLMetaImpl::GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) { +MySQLMetaImpl::GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) { return Status::OK(); } Status -MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) { +MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) { table_files.clear(); try { @@ -1011,11 +1016,11 @@ MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& tab } // Scoped Connection Status ret; - std::map groups; - TableFileSchema table_file; + std::map groups; + SegmentSchema table_file; for (auto& resRow : res) { table_file.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_file.table_id_); + resRow["table_id"].to_string(table_file.collection_id_); resRow["segment_id"].to_string(table_file.segment_id_); table_file.engine_type_ = resRow["engine_type"]; resRow["file_id"].to_string(table_file.file_id_); @@ -1025,20 +1030,20 @@ MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& tab table_file.date_ = resRow["date"]; table_file.created_on_ = resRow["created_on"]; - auto groupItr = groups.find(table_file.table_id_); + auto groupItr = groups.find(table_file.collection_id_); if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; + CollectionSchema table_schema; + table_schema.collection_id_ = table_file.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } - groups[table_file.table_id_] = table_schema; + groups[table_file.collection_id_] = table_schema; } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.index_params_ = groups[table_file.table_id_].index_params_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + table_file.dimension_ = groups[table_file.collection_id_].dimension_; + table_file.index_file_size_ = groups[table_file.collection_id_].index_file_size_; + table_file.index_params_ = groups[table_file.collection_id_].index_params_; + table_file.metric_type_ = groups[table_file.collection_id_].metric_type_; auto status = utils::GetTableFilePath(options_, table_file); if (!status.ok()) { @@ -1059,7 +1064,7 @@ MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& tab // ZR: this function assumes all fields in file_schema have value Status -MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { +MySQLMetaImpl::UpdateTableFile(SegmentSchema& file_schema) { file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); try { @@ -1076,10 +1081,10 @@ MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { mysqlpp::Query updateTableFileQuery = connectionPtr->query(); - // if the table has been deleted, just mark the table file as TO_DELETE + // if the collection has been deleted, just mark the collection file as TO_DELETE // clean thread will delete the file later updateTableFileQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote - << file_schema.table_id_ << ";"; + << file_schema.collection_id_ << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); @@ -1087,15 +1092,15 @@ MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { if (res.num_rows() == 1) { int state = res[0]["state"]; - if (state == TableSchema::TO_DELETE) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; + if (state == CollectionSchema::TO_DELETE) { + file_schema.file_type_ = SegmentSchema::TO_DELETE; } } else { - file_schema.file_type_ = TableFileSchema::TO_DELETE; + file_schema.file_type_ = SegmentSchema::TO_DELETE; } std::string id = std::to_string(file_schema.id_); - std::string table_id = file_schema.table_id_; + std::string collection_id = file_schema.collection_id_; std::string engine_type = std::to_string(file_schema.engine_type_); std::string file_id = file_schema.file_id_; std::string file_type = std::to_string(file_schema.file_type_); @@ -1105,22 +1110,23 @@ MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { std::string created_on = std::to_string(file_schema.created_on_); std::string date = std::to_string(file_schema.date_); - updateTableFileQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote << table_id - << " ,engine_type = " << engine_type << " ,file_id = " << mysqlpp::quote << file_id - << " ,file_type = " << file_type << " ,file_size = " << file_size - << " ,row_count = " << row_count << " ,updated_time = " << updated_time - << " ,created_on = " << created_on << " ,date = " << date << " WHERE id = " << id - << ";"; + updateTableFileQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote + << collection_id << " ,engine_type = " << engine_type + << " ,file_id = " << mysqlpp::quote << file_id << " ,file_type = " << file_type + << " ,file_size = " << file_size << " ,row_count = " << row_count + << " ,updated_time = " << updated_time << " ,created_on = " << created_on + << " ,date = " << date << " WHERE id = " << id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); if (!updateTableFileQuery.exec()) { - ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_; + ENGINE_LOG_DEBUG << "collection_id= " << file_schema.collection_id_ + << " file_id=" << file_schema.file_id_; return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE", updateTableFileQuery.error()); } } // Scoped Connection - ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; + ENGINE_LOG_DEBUG << "Update single collection file, file id = " << file_schema.file_id_; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILE", e.what()); } @@ -1129,7 +1135,7 @@ MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { } Status -MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { +MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) { try { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); @@ -1143,10 +1149,10 @@ MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { mysqlpp::Query updateTableFilesToIndexQuery = connectionPtr->query(); updateTableFilesToIndexQuery << "UPDATE " << META_TABLEFILES - << " SET file_type = " << std::to_string(TableFileSchema::TO_INDEX) - << " WHERE table_id = " << mysqlpp::quote << table_id + << " SET file_type = " << std::to_string(SegmentSchema::TO_INDEX) + << " WHERE table_id = " << mysqlpp::quote << collection_id << " AND row_count >= " << std::to_string(meta::BUILD_INDEX_THRESHOLD) - << " AND file_type = " << std::to_string(TableFileSchema::RAW) << ";"; + << " AND file_type = " << std::to_string(SegmentSchema::RAW) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesToIndex: " << updateTableFilesToIndexQuery.str(); @@ -1155,7 +1161,7 @@ MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { updateTableFilesToIndexQuery.error()); } - ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; + ENGINE_LOG_DEBUG << "Update files to to_index, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES TO INDEX", e.what()); } @@ -1164,7 +1170,7 @@ MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { } Status -MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { +MySQLMetaImpl::UpdateTableFiles(SegmentsSchema& files) { try { server::MetricCollector metric; { @@ -1181,14 +1187,14 @@ MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { std::map has_tables; for (auto& file_schema : files) { - if (has_tables.find(file_schema.table_id_) != has_tables.end()) { + if (has_tables.find(file_schema.collection_id_) != has_tables.end()) { continue; } updateTableFilesQuery << "SELECT EXISTS" << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote - << file_schema.table_id_ << " AND state <> " - << std::to_string(TableSchema::TO_DELETE) << ")" + << file_schema.collection_id_ << " AND state <> " + << std::to_string(CollectionSchema::TO_DELETE) << ")" << " AS " << mysqlpp::quote << "check" << ";"; @@ -1197,17 +1203,17 @@ MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { mysqlpp::StoreQueryResult res = updateTableFilesQuery.store(); int check = res[0]["check"]; - has_tables[file_schema.table_id_] = (check == 1); + has_tables[file_schema.collection_id_] = (check == 1); } for (auto& file_schema : files) { - if (!has_tables[file_schema.table_id_]) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; + if (!has_tables[file_schema.collection_id_]) { + file_schema.file_type_ = SegmentSchema::TO_DELETE; } file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); std::string id = std::to_string(file_schema.id_); - std::string& table_id = file_schema.table_id_; + std::string& collection_id = file_schema.collection_id_; std::string engine_type = std::to_string(file_schema.engine_type_); std::string& file_id = file_schema.file_id_; std::string file_type = std::to_string(file_schema.file_type_); @@ -1218,7 +1224,7 @@ MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { std::string date = std::to_string(file_schema.date_); updateTableFilesQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote - << table_id << " ,engine_type = " << engine_type + << collection_id << " ,engine_type = " << engine_type << " ,file_id = " << mysqlpp::quote << file_id << " ,file_type = " << file_type << " ,file_size = " << file_size << " ,row_count = " << row_count << " ,updated_time = " << updated_time << " ,created_on = " << created_on @@ -1232,7 +1238,7 @@ MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { } } // Scoped Connection - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files"; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES", e.what()); } @@ -1241,7 +1247,7 @@ MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { } Status -MySQLMetaImpl::UpdateTableFilesRowCount(TableFilesSchema& files) { +MySQLMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) { try { server::MetricCollector metric; { @@ -1272,7 +1278,7 @@ MySQLMetaImpl::UpdateTableFilesRowCount(TableFilesSchema& files) { } } // Scoped Connection - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files"; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES ROW COUNT", e.what()); } @@ -1281,7 +1287,7 @@ MySQLMetaImpl::UpdateTableFilesRowCount(TableFilesSchema& files) { } Status -MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { +MySQLMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex& index) { try { server::MetricCollector metric; @@ -1297,8 +1303,9 @@ MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index mysqlpp::Query describeTableIndexQuery = connectionPtr->query(); describeTableIndexQuery << "SELECT engine_type, index_params, index_file_size, metric_type" - << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << collection_id << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) + << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTableIndex: " << describeTableIndexQuery.str(); @@ -1313,7 +1320,7 @@ MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index index.extra_params_ = milvus::json::parse(str_index_params); index.metric_type_ = resRow["metric_type"]; } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + return Status(DB_NOT_FOUND, "Collection " + collection_id + " not found"); } } // Scoped Connection } catch (std::exception& e) { @@ -1324,7 +1331,7 @@ MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index } Status -MySQLMetaImpl::DropTableIndex(const std::string& table_id) { +MySQLMetaImpl::DropTableIndex(const std::string& collection_id) { try { server::MetricCollector metric; @@ -1342,10 +1349,10 @@ MySQLMetaImpl::DropTableIndex(const std::string& table_id) { // soft delete index files dropTableIndexQuery << "UPDATE " << META_TABLEFILES - << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " SET file_type = " << std::to_string(SegmentSchema::TO_DELETE) << " ,updated_time = " << utils::GetMicroSecTimeStamp() - << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND file_type = " << std::to_string(TableFileSchema::INDEX) << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id + << " AND file_type = " << std::to_string(SegmentSchema::INDEX) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); @@ -1355,10 +1362,10 @@ MySQLMetaImpl::DropTableIndex(const std::string& table_id) { // set all backup file to raw dropTableIndexQuery << "UPDATE " << META_TABLEFILES - << " SET file_type = " << std::to_string(TableFileSchema::RAW) + << " SET file_type = " << std::to_string(SegmentSchema::RAW) << " ,updated_time = " << utils::GetMicroSecTimeStamp() - << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND file_type = " << std::to_string(TableFileSchema::BACKUP) << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id + << " AND file_type = " << std::to_string(SegmentSchema::BACKUP) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); @@ -1366,7 +1373,7 @@ MySQLMetaImpl::DropTableIndex(const std::string& table_id) { return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); } - // set table index type to raw + // set collection index type to raw dropTableIndexQuery << "UPDATE " << META_TABLES << " SET engine_type = " << " (CASE" << " WHEN metric_type in (" << (int32_t)MetricType::HAMMING << " ," @@ -1374,7 +1381,7 @@ MySQLMetaImpl::DropTableIndex(const std::string& table_id) { << " THEN " << (int32_t)EngineType::FAISS_BIN_IDMAP << " ELSE " << (int32_t)EngineType::FAISS_IDMAP << " END)" << " , index_params = '{}'" - << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + << " WHERE table_id = " << mysqlpp::quote << collection_id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); @@ -1383,7 +1390,7 @@ MySQLMetaImpl::DropTableIndex(const std::string& table_id) { } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully drop collection index, collection id = " << collection_id; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN DROPPING TABLE INDEX", e.what()); } @@ -1392,12 +1399,12 @@ MySQLMetaImpl::DropTableIndex(const std::string& table_id) { } Status -MySQLMetaImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag, - uint64_t lsn) { +MySQLMetaImpl::CreatePartition(const std::string& collection_id, const std::string& partition_name, + const std::string& tag, uint64_t lsn) { server::MetricCollector metric; - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1415,22 +1422,22 @@ MySQLMetaImpl::CreatePartition(const std::string& table_id, const std::string& p // not allow duplicated partition std::string exist_partition; - GetPartitionName(table_id, valid_tag, exist_partition); + GetPartitionName(collection_id, valid_tag, exist_partition); if (!exist_partition.empty()) { return Status(DB_ERROR, "Duplicate partition is not allowed"); } if (partition_name == "") { // generate unique partition name - NextTableId(table_schema.table_id_); + NextTableId(table_schema.collection_id_); } else { - table_schema.table_id_ = partition_name; + table_schema.collection_id_ = partition_name; } table_schema.id_ = -1; table_schema.flag_ = 0; table_schema.created_on_ = utils::GetMicroSecTimeStamp(); - table_schema.owner_table_ = table_id; + table_schema.owner_table_ = collection_id; table_schema.partition_tag_ = valid_tag; table_schema.flush_lsn_ = lsn; @@ -1449,7 +1456,8 @@ MySQLMetaImpl::DropPartition(const std::string& partition_name) { } Status -MySQLMetaImpl::ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) { +MySQLMetaImpl::ShowPartitions(const std::string& collection_id, + std::vector& partition_schema_array) { try { server::MetricCollector metric; mysqlpp::StoreQueryResult res; @@ -1466,8 +1474,8 @@ MySQLMetaImpl::ShowPartitions(const std::string& table_id, std::vectorquery(); allPartitionsQuery << "SELECT table_id, id, state, dimension, created_on, flag, index_file_size," << " engine_type, index_params, metric_type, partition_tag, version FROM " << META_TABLES - << " WHERE owner_table = " << mysqlpp::quote << table_id << " AND state <> " - << std::to_string(TableSchema::TO_DELETE) << ";"; + << " WHERE owner_table = " << mysqlpp::quote << collection_id << " AND state <> " + << std::to_string(CollectionSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str(); @@ -1475,8 +1483,8 @@ MySQLMetaImpl::ShowPartitions(const std::string& table_id, std::vectorquery(); allPartitionsQuery << "SELECT table_id FROM " << META_TABLES << " WHERE owner_table = " << mysqlpp::quote - << table_id << " AND partition_tag = " << mysqlpp::quote << valid_tag << " AND state <> " - << std::to_string(TableSchema::TO_DELETE) << ";"; + << collection_id << " AND partition_tag = " << mysqlpp::quote << valid_tag + << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str(); @@ -1534,7 +1542,7 @@ MySQLMetaImpl::GetPartitionName(const std::string& table_id, const std::string& const mysqlpp::Row& resRow = res[0]; resRow["table_id"].to_string(partition_name); } else { - return Status(DB_NOT_FOUND, "Partition " + valid_tag + " of table " + table_id + " not found"); + return Status(DB_NOT_FOUND, "Partition " + valid_tag + " of collection " + collection_id + " not found"); } } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN GET PARTITION NAME", e.what()); @@ -1544,7 +1552,7 @@ MySQLMetaImpl::GetPartitionName(const std::string& table_id, const std::string& } Status -MySQLMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& files) { +MySQLMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema& files) { files.clear(); try { @@ -1563,21 +1571,21 @@ MySQLMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& file mysqlpp::Query filesToSearchQuery = connectionPtr->query(); filesToSearchQuery << "SELECT id, table_id, segment_id, engine_type, file_id, file_type, file_size, row_count, date" - << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id; + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id; // End filesToSearchQuery << " AND" - << " (file_type = " << std::to_string(TableFileSchema::RAW) - << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) - << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + << " (file_type = " << std::to_string(SegmentSchema::RAW) + << " OR file_type = " << std::to_string(SegmentSchema::TO_INDEX) + << " OR file_type = " << std::to_string(SegmentSchema::INDEX) << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToSearch: " << filesToSearchQuery.str(); res = filesToSearchQuery.store(); } // Scoped Connection - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1585,9 +1593,9 @@ MySQLMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& file Status ret; for (auto& resRow : res) { - TableFileSchema table_file; + SegmentSchema table_file; table_file.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_file.table_id_); + resRow["table_id"].to_string(table_file.collection_id_); resRow["segment_id"].to_string(table_file.segment_id_); table_file.index_file_size_ = table_schema.index_file_size_; table_file.engine_type_ = resRow["engine_type"]; @@ -1618,15 +1626,15 @@ MySQLMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& file } Status -MySQLMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& files) { +MySQLMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& files) { files.clear(); try { server::MetricCollector metric; - // check table existence - TableSchema table_schema; - table_schema.table_id_ = table_id; + // check collection existence + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1646,8 +1654,8 @@ MySQLMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& files mysqlpp::Query filesToMergeQuery = connectionPtr->query(); filesToMergeQuery << "SELECT id, table_id, segment_id, file_id, file_type, file_size, row_count, date, " "engine_type, created_on" - << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND file_type = " << std::to_string(TableFileSchema::RAW) + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id + << " AND file_type = " << std::to_string(SegmentSchema::RAW) << " ORDER BY row_count DESC;"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToMerge: " << filesToMergeQuery.str(); @@ -1658,14 +1666,14 @@ MySQLMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& files Status ret; int64_t to_merge_files = 0; for (auto& resRow : res) { - TableFileSchema table_file; + SegmentSchema table_file; table_file.file_size_ = resRow["file_size"]; if (table_file.file_size_ >= table_schema.index_file_size_) { continue; // skip large file } table_file.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_file.table_id_); + resRow["table_id"].to_string(table_file.collection_id_); resRow["segment_id"].to_string(table_file.segment_id_); resRow["file_id"].to_string(table_file.file_id_); table_file.file_type_ = resRow["file_type"]; @@ -1697,7 +1705,7 @@ MySQLMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& files } Status -MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { +MySQLMetaImpl::FilesToIndex(SegmentsSchema& files) { files.clear(); try { @@ -1717,7 +1725,7 @@ MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { filesToIndexQuery << "SELECT id, table_id, segment_id, engine_type, file_id, file_type, file_size, " "row_count, date, created_on" << " FROM " << META_TABLEFILES - << " WHERE file_type = " << std::to_string(TableFileSchema::TO_INDEX) << ";"; + << " WHERE file_type = " << std::to_string(SegmentSchema::TO_INDEX) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToIndex: " << filesToIndexQuery.str(); @@ -1725,11 +1733,11 @@ MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { } // Scoped Connection Status ret; - std::map groups; - TableFileSchema table_file; + std::map groups; + SegmentSchema table_file; for (auto& resRow : res) { table_file.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_file.table_id_); + resRow["table_id"].to_string(table_file.collection_id_); resRow["segment_id"].to_string(table_file.segment_id_); table_file.engine_type_ = resRow["engine_type"]; resRow["file_id"].to_string(table_file.file_id_); @@ -1739,20 +1747,20 @@ MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { table_file.date_ = resRow["date"]; table_file.created_on_ = resRow["created_on"]; - auto groupItr = groups.find(table_file.table_id_); + auto groupItr = groups.find(table_file.collection_id_); if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; + CollectionSchema table_schema; + table_schema.collection_id_ = table_file.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } - groups[table_file.table_id_] = table_schema; + groups[table_file.collection_id_] = table_schema; } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.index_params_ = groups[table_file.table_id_].index_params_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + table_file.dimension_ = groups[table_file.collection_id_].dimension_; + table_file.index_file_size_ = groups[table_file.collection_id_].index_file_size_; + table_file.index_params_ = groups[table_file.collection_id_].index_params_; + table_file.metric_type_ = groups[table_file.collection_id_].metric_type_; auto status = utils::GetTableFilePath(options_, table_file); if (!status.ok()) { @@ -1772,7 +1780,8 @@ MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { } Status -MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& file_types, TableFilesSchema& files) { +MySQLMetaImpl::FilesByType(const std::string& collection_id, const std::vector& file_types, + SegmentsSchema& files) { if (file_types.empty()) { return Status(DB_ERROR, "file types array is empty"); } @@ -1802,10 +1811,10 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& } mysqlpp::Query hasNonIndexFilesQuery = connectionPtr->query(); - // since table_id is a unique column we just need to check whether it exists or not + // since collection_id is a unique column we just need to check whether it exists or not hasNonIndexFilesQuery << "SELECT id, segment_id, engine_type, file_id, file_type, file_size, row_count, date, created_on" - << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id << " AND file_type in (" << types << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesByType: " << hasNonIndexFilesQuery.str(); @@ -1813,8 +1822,8 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& res = hasNonIndexFilesQuery.store(); } // Scoped Connection - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1824,9 +1833,9 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; int to_index_count = 0, index_count = 0, backup_count = 0; for (auto& resRow : res) { - TableFileSchema file_schema; + SegmentSchema file_schema; file_schema.id_ = resRow["id"]; - file_schema.table_id_ = table_id; + file_schema.collection_id_ = collection_id; resRow["segment_id"].to_string(file_schema.segment_id_); file_schema.engine_type_ = resRow["engine_type"]; resRow["file_id"].to_string(file_schema.file_id_); @@ -1850,25 +1859,25 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& int32_t file_type = resRow["file_type"]; switch (file_type) { - case (int)TableFileSchema::RAW: + case (int)SegmentSchema::RAW: ++raw_count; break; - case (int)TableFileSchema::NEW: + case (int)SegmentSchema::NEW: ++new_count; break; - case (int)TableFileSchema::NEW_MERGE: + case (int)SegmentSchema::NEW_MERGE: ++new_merge_count; break; - case (int)TableFileSchema::NEW_INDEX: + case (int)SegmentSchema::NEW_INDEX: ++new_index_count; break; - case (int)TableFileSchema::TO_INDEX: + case (int)SegmentSchema::TO_INDEX: ++to_index_count; break; - case (int)TableFileSchema::INDEX: + case (int)SegmentSchema::INDEX: ++index_count; break; - case (int)TableFileSchema::BACKUP: + case (int)SegmentSchema::BACKUP: ++backup_count; break; default: @@ -1876,28 +1885,28 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& } } - std::string msg = "Get table files by type."; + std::string msg = "Get collection files by type."; for (int file_type : file_types) { switch (file_type) { - case (int)TableFileSchema::RAW: + case (int)SegmentSchema::RAW: msg = msg + " raw files:" + std::to_string(raw_count); break; - case (int)TableFileSchema::NEW: + case (int)SegmentSchema::NEW: msg = msg + " new files:" + std::to_string(new_count); break; - case (int)TableFileSchema::NEW_MERGE: + case (int)SegmentSchema::NEW_MERGE: msg = msg + " new_merge files:" + std::to_string(new_merge_count); break; - case (int)TableFileSchema::NEW_INDEX: + case (int)SegmentSchema::NEW_INDEX: msg = msg + " new_index files:" + std::to_string(new_index_count); break; - case (int)TableFileSchema::TO_INDEX: + case (int)SegmentSchema::TO_INDEX: msg = msg + " to_index files:" + std::to_string(to_index_count); break; - case (int)TableFileSchema::INDEX: + case (int)SegmentSchema::INDEX: msg = msg + " index files:" + std::to_string(index_count); break; - case (int)TableFileSchema::BACKUP: + case (int)SegmentSchema::BACKUP: msg = msg + " backup files:" + std::to_string(backup_count); break; default: @@ -1914,7 +1923,7 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& } Status -MySQLMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& files) { +MySQLMetaImpl::FilesByID(const std::vector& ids, SegmentsSchema& files) { files.clear(); if (ids.empty()) { @@ -1950,21 +1959,21 @@ MySQLMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& files // End filesToSearchQuery << " AND" - << " (file_type = " << std::to_string(TableFileSchema::RAW) - << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) - << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + << " (file_type = " << std::to_string(SegmentSchema::RAW) + << " OR file_type = " << std::to_string(SegmentSchema::TO_INDEX) + << " OR file_type = " << std::to_string(SegmentSchema::INDEX) << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToSearch: " << filesToSearchQuery.str(); res = filesToSearchQuery.store(); } // Scoped Connection - std::map tables; + std::map tables; Status ret; for (auto& resRow : res) { - TableFileSchema table_file; + SegmentSchema table_file; table_file.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_file.table_id_); + resRow["table_id"].to_string(table_file.collection_id_); resRow["segment_id"].to_string(table_file.segment_id_); table_file.engine_type_ = resRow["engine_type"]; resRow["file_id"].to_string(table_file.file_id_); @@ -1973,14 +1982,14 @@ MySQLMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& files table_file.row_count_ = resRow["row_count"]; table_file.date_ = resRow["date"]; - if (tables.find(table_file.table_id_) == tables.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; + if (tables.find(table_file.collection_id_) == tables.end()) { + CollectionSchema table_schema; + table_schema.collection_id_ = table_file.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } - tables.insert(std::make_pair(table_file.table_id_, table_schema)); + tables.insert(std::make_pair(table_file.collection_id_, table_schema)); } auto status = utils::GetTableFilePath(options_, table_file); @@ -1992,7 +2001,7 @@ MySQLMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& files } for (auto& table_file : files) { - TableSchema& table_schema = tables[table_file.table_id_]; + CollectionSchema& table_schema = tables[table_file.collection_id_]; table_file.dimension_ = table_schema.dimension_; table_file.index_file_size_ = table_schema.index_file_size_; table_file.index_params_ = table_schema.index_params_; @@ -2038,9 +2047,9 @@ MySQLMetaImpl::Archive() { mysqlpp::Query archiveQuery = connectionPtr->query(); archiveQuery << "UPDATE " << META_TABLEFILES - << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " SET file_type = " << std::to_string(SegmentSchema::TO_DELETE) << " WHERE created_on < " << std::to_string(now - usecs) << " AND file_type <> " - << std::to_string(TableFileSchema::TO_DELETE) << ";"; + << std::to_string(SegmentSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Archive: " << archiveQuery.str(); @@ -2086,7 +2095,7 @@ MySQLMetaImpl::Size(uint64_t& result) { mysqlpp::Query getSizeQuery = connectionPtr->query(); getSizeQuery << "SELECT IFNULL(SUM(file_size),0) AS sum" << " FROM " << META_TABLEFILES << " WHERE file_type <> " - << std::to_string(TableFileSchema::TO_DELETE) << ";"; + << std::to_string(SegmentSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Size: " << getSizeQuery.str(); @@ -2128,10 +2137,10 @@ MySQLMetaImpl::CleanUpShadowFiles() { mysqlpp::StoreQueryResult res = cleanUpQuery.store(); if (!res.empty()) { - ENGINE_LOG_DEBUG << "Remove table file type as NEW"; + ENGINE_LOG_DEBUG << "Remove collection file type as NEW"; cleanUpQuery << "DELETE FROM " << META_TABLEFILES << " WHERE file_type IN (" - << std::to_string(TableFileSchema::NEW) << "," << std::to_string(TableFileSchema::NEW_MERGE) - << "," << std::to_string(TableFileSchema::NEW_INDEX) << ");"; + << std::to_string(SegmentSchema::NEW) << "," << std::to_string(SegmentSchema::NEW_MERGE) << "," + << std::to_string(SegmentSchema::NEW_INDEX) << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUp: " << cleanUpQuery.str(); @@ -2154,7 +2163,7 @@ Status MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) { auto now = utils::GetMicroSecTimeStamp(); std::set table_ids; - std::map segment_ids; + std::map segment_ids; // remove to_delete files try { @@ -2173,21 +2182,21 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) mysqlpp::Query query = connectionPtr->query(); query << "SELECT id, table_id, segment_id, engine_type, file_id, file_type, date" - << " FROM " << META_TABLEFILES << " WHERE file_type IN (" - << std::to_string(TableFileSchema::TO_DELETE) << "," << std::to_string(TableFileSchema::BACKUP) << ")" + << " FROM " << META_TABLEFILES << " WHERE file_type IN (" << std::to_string(SegmentSchema::TO_DELETE) + << "," << std::to_string(SegmentSchema::BACKUP) << ")" << " AND updated_time < " << std::to_string(now - seconds * US_PS) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << query.str(); mysqlpp::StoreQueryResult res = query.store(); - TableFileSchema table_file; + SegmentSchema table_file; std::vector delete_ids; int64_t clean_files = 0; for (auto& resRow : res) { table_file.id_ = resRow["id"]; // implicit conversion - resRow["table_id"].to_string(table_file.table_id_); + resRow["table_id"].to_string(table_file.collection_id_); resRow["segment_id"].to_string(table_file.segment_id_); table_file.engine_type_ = resRow["engine_type"]; resRow["file_id"].to_string(table_file.file_id_); @@ -2206,13 +2215,13 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) utils::GetTableFilePath(options_, table_file); server::CommonUtil::EraseFromCache(table_file.location_); - if (table_file.file_type_ == (int)TableFileSchema::TO_DELETE) { + if (table_file.file_type_ == (int)SegmentSchema::TO_DELETE) { // delete file from disk storage utils::DeleteTableFilePath(options_, table_file); ENGINE_LOG_DEBUG << "Remove file id:" << table_file.id_ << " location:" << table_file.location_; delete_ids.emplace_back(std::to_string(table_file.id_)); - table_ids.insert(table_file.table_id_); + table_ids.insert(table_file.collection_id_); segment_ids.insert(std::make_pair(table_file.segment_id_, table_file)); clean_files++; @@ -2262,7 +2271,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) mysqlpp::Query query = connectionPtr->query(); query << "SELECT id, table_id" - << " FROM " << META_TABLES << " WHERE state = " << std::to_string(TableSchema::TO_DELETE) << ";"; + << " FROM " << META_TABLES << " WHERE state = " << std::to_string(CollectionSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << query.str(); @@ -2273,10 +2282,10 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) std::stringstream idsToDeleteSS; for (auto& resRow : res) { size_t id = resRow["id"]; - std::string table_id; - resRow["table_id"].to_string(table_id); + std::string collection_id; + resRow["table_id"].to_string(collection_id); - utils::DeleteTablePath(options_, table_id, false); // only delete empty folder + utils::DeleteTablePath(options_, collection_id, false); // only delete empty folder ++remove_tables; idsToDeleteSS << "id = " << std::to_string(id) << " OR "; } @@ -2299,8 +2308,8 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); } - // remove deleted table folder - // don't remove table folder until all its files has been deleted + // remove deleted collection folder + // don't remove collection folder until all its files has been deleted try { server::MetricCollector metric; @@ -2316,17 +2325,17 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } - for (auto& table_id : table_ids) { + for (auto& collection_id : table_ids) { mysqlpp::Query query = connectionPtr->query(); query << "SELECT file_id" - << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << query.str(); mysqlpp::StoreQueryResult res = query.store(); if (res.empty()) { - utils::DeleteTablePath(options_, table_id); + utils::DeleteTablePath(options_, collection_id); } } @@ -2387,12 +2396,12 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) } Status -MySQLMetaImpl::Count(const std::string& table_id, uint64_t& result) { +MySQLMetaImpl::Count(const std::string& collection_id, uint64_t& result) { try { server::MetricCollector metric; - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { @@ -2412,10 +2421,10 @@ MySQLMetaImpl::Count(const std::string& table_id, uint64_t& result) { mysqlpp::Query countQuery = connectionPtr->query(); countQuery << "SELECT row_count" - << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id - << " AND (file_type = " << std::to_string(TableFileSchema::RAW) - << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) - << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id + << " AND (file_type = " << std::to_string(SegmentSchema::RAW) + << " OR file_type = " << std::to_string(SegmentSchema::TO_INDEX) + << " OR file_type = " << std::to_string(SegmentSchema::INDEX) << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Count: " << countQuery.str(); @@ -2484,7 +2493,7 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { mysqlpp::Query discardFilesQuery = connectionPtr->query(); discardFilesQuery << "SELECT id, file_size" << " FROM " << META_TABLEFILES << " WHERE file_type <> " - << std::to_string(TableFileSchema::TO_DELETE) << " ORDER BY id ASC " + << std::to_string(SegmentSchema::TO_DELETE) << " ORDER BY id ASC " << " LIMIT 10;"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); @@ -2494,7 +2503,7 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { return Status::OK(); } - TableFileSchema table_file; + SegmentSchema table_file; std::stringstream idsToDiscardSS; for (auto& resRow : res) { if (to_discard_size <= 0) { @@ -2512,7 +2521,7 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { idsToDiscardStr = idsToDiscardStr.substr(0, idsToDiscardStr.size() - 4); // remove the last " OR " discardFilesQuery << "UPDATE " << META_TABLEFILES - << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " SET file_type = " << std::to_string(SegmentSchema::TO_DELETE) << " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " WHERE " << idsToDiscardStr << ";"; diff --git a/core/src/db/meta/MySQLMetaImpl.h b/core/src/db/meta/MySQLMetaImpl.h index c040db49eb..2d6c7be2ee 100644 --- a/core/src/db/meta/MySQLMetaImpl.h +++ b/core/src/db/meta/MySQLMetaImpl.h @@ -32,92 +32,94 @@ class MySQLMetaImpl : public Meta { ~MySQLMetaImpl(); Status - CreateTable(TableSchema& table_schema) override; + CreateTable(CollectionSchema& table_schema) override; Status - DescribeTable(TableSchema& table_schema) override; + DescribeTable(CollectionSchema& table_schema) override; Status - HasTable(const std::string& table_id, bool& has_or_not) override; + HasTable(const std::string& collection_id, bool& has_or_not) override; Status - AllTables(std::vector& table_schema_array) override; + AllTables(std::vector& table_schema_array) override; Status - DropTable(const std::string& table_id) override; + DropTable(const std::string& collection_id) override; Status - DeleteTableFiles(const std::string& table_id) override; + DeleteTableFiles(const std::string& collection_id) override; Status - CreateTableFile(TableFileSchema& file_schema) override; + CreateTableFile(SegmentSchema& file_schema) override; Status - GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) override; + GetTableFiles(const std::string& collection_id, const std::vector& ids, + SegmentsSchema& table_files) override; Status - GetTableFilesBySegmentId(const std::string& segment_id, TableFilesSchema& table_files) override; + GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override; Status - UpdateTableIndex(const std::string& table_id, const TableIndex& index) override; + UpdateTableIndex(const std::string& collection_id, const TableIndex& index) override; Status - UpdateTableFlag(const std::string& table_id, int64_t flag) override; + UpdateTableFlag(const std::string& collection_id, int64_t flag) override; Status - UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) override; + UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) override; Status - GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) override; + GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override; Status - GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) override; + GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) override; Status - UpdateTableFile(TableFileSchema& file_schema) override; + UpdateTableFile(SegmentSchema& file_schema) override; Status - UpdateTableFilesToIndex(const std::string& table_id) override; + UpdateTableFilesToIndex(const std::string& collection_id) override; Status - UpdateTableFiles(TableFilesSchema& files) override; + UpdateTableFiles(SegmentsSchema& files) override; Status - UpdateTableFilesRowCount(TableFilesSchema& files) override; + UpdateTableFilesRowCount(SegmentsSchema& files) override; Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) override; + DescribeTableIndex(const std::string& collection_id, TableIndex& index) override; Status - DropTableIndex(const std::string& table_id) override; + DropTableIndex(const std::string& collection_id) override; Status - CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag, + CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag, uint64_t lsn) override; Status DropPartition(const std::string& partition_name) override; Status - ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) override; + ShowPartitions(const std::string& collection_id, + std::vector& partition_schema_array) override; Status - GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override; + GetPartitionName(const std::string& collection_id, const std::string& tag, std::string& partition_name) override; Status - FilesToSearch(const std::string& table_id, TableFilesSchema& files) override; + FilesToSearch(const std::string& collection_id, SegmentsSchema& files) override; Status - FilesToMerge(const std::string& table_id, TableFilesSchema& files) override; + FilesToMerge(const std::string& collection_id, SegmentsSchema& files) override; Status - FilesToIndex(TableFilesSchema&) override; + FilesToIndex(SegmentsSchema&) override; Status - FilesByType(const std::string& table_id, const std::vector& file_types, TableFilesSchema& files) override; + FilesByType(const std::string& collection_id, const std::vector& file_types, SegmentsSchema& files) override; Status - FilesByID(const std::vector& ids, TableFilesSchema& table_files) override; + FilesByID(const std::vector& ids, SegmentsSchema& table_files) override; Status Archive() override; @@ -135,7 +137,7 @@ class MySQLMetaImpl : public Meta { DropAll() override; Status - Count(const std::string& table_id, uint64_t& result) override; + Count(const std::string& collection_id, uint64_t& result) override; Status SetGlobalLastLSN(uint64_t lsn) override; @@ -147,7 +149,7 @@ class MySQLMetaImpl : public Meta { Status NextFileId(std::string& file_id); Status - NextTableId(std::string& table_id); + NextTableId(std::string& collection_id); Status DiscardFiles(int64_t to_discard_size); diff --git a/core/src/db/meta/SqliteMetaImpl.cpp b/core/src/db/meta/SqliteMetaImpl.cpp index b0c762b401..beaa396a12 100644 --- a/core/src/db/meta/SqliteMetaImpl.cpp +++ b/core/src/db/meta/SqliteMetaImpl.cpp @@ -62,30 +62,30 @@ StoragePrototype(const std::string& path) { return make_storage( path, make_table(META_ENVIRONMENT, make_column("global_lsn", &EnvironmentSchema::global_lsn_, default_value(0))), - make_table(META_TABLES, make_column("id", &TableSchema::id_, primary_key()), - make_column("table_id", &TableSchema::table_id_, unique()), - make_column("state", &TableSchema::state_), make_column("dimension", &TableSchema::dimension_), - make_column("created_on", &TableSchema::created_on_), - make_column("flag", &TableSchema::flag_, default_value(0)), - make_column("index_file_size", &TableSchema::index_file_size_), - make_column("engine_type", &TableSchema::engine_type_), - make_column("index_params", &TableSchema::index_params_), - make_column("metric_type", &TableSchema::metric_type_), - make_column("owner_table", &TableSchema::owner_table_, default_value("")), - make_column("partition_tag", &TableSchema::partition_tag_, default_value("")), - make_column("version", &TableSchema::version_, default_value(CURRENT_VERSION)), - make_column("flush_lsn", &TableSchema::flush_lsn_)), + make_table(META_TABLES, make_column("id", &CollectionSchema::id_, primary_key()), + make_column("table_id", &CollectionSchema::collection_id_, unique()), + make_column("state", &CollectionSchema::state_), make_column("dimension", &CollectionSchema::dimension_), + make_column("created_on", &CollectionSchema::created_on_), + make_column("flag", &CollectionSchema::flag_, default_value(0)), + make_column("index_file_size", &CollectionSchema::index_file_size_), + make_column("engine_type", &CollectionSchema::engine_type_), + make_column("index_params", &CollectionSchema::index_params_), + make_column("metric_type", &CollectionSchema::metric_type_), + make_column("owner_table", &CollectionSchema::owner_table_, default_value("")), + make_column("partition_tag", &CollectionSchema::partition_tag_, default_value("")), + make_column("version", &CollectionSchema::version_, default_value(CURRENT_VERSION)), + make_column("flush_lsn", &CollectionSchema::flush_lsn_)), make_table( - META_TABLEFILES, make_column("id", &TableFileSchema::id_, primary_key()), - make_column("table_id", &TableFileSchema::table_id_), - make_column("segment_id", &TableFileSchema::segment_id_, default_value("")), - make_column("engine_type", &TableFileSchema::engine_type_), - make_column("file_id", &TableFileSchema::file_id_), make_column("file_type", &TableFileSchema::file_type_), - make_column("file_size", &TableFileSchema::file_size_, default_value(0)), - make_column("row_count", &TableFileSchema::row_count_, default_value(0)), - make_column("updated_time", &TableFileSchema::updated_time_), - make_column("created_on", &TableFileSchema::created_on_), make_column("date", &TableFileSchema::date_), - make_column("flush_lsn", &TableFileSchema::flush_lsn_))); + META_TABLEFILES, make_column("id", &SegmentSchema::id_, primary_key()), + make_column("table_id", &SegmentSchema::collection_id_), + make_column("segment_id", &SegmentSchema::segment_id_, default_value("")), + make_column("engine_type", &SegmentSchema::engine_type_), + make_column("file_id", &SegmentSchema::file_id_), make_column("file_type", &SegmentSchema::file_type_), + make_column("file_size", &SegmentSchema::file_size_, default_value(0)), + make_column("row_count", &SegmentSchema::row_count_, default_value(0)), + make_column("updated_time", &SegmentSchema::updated_time_), + make_column("created_on", &SegmentSchema::created_on_), make_column("date", &SegmentSchema::date_), + make_column("flush_lsn", &SegmentSchema::flush_lsn_))); } using ConnectorT = decltype(StoragePrototype("")); @@ -99,12 +99,12 @@ SqliteMetaImpl::~SqliteMetaImpl() { } Status -SqliteMetaImpl::NextTableId(std::string& table_id) { +SqliteMetaImpl::NextTableId(std::string& collection_id) { std::lock_guard lock(genid_mutex_); // avoid duplicated id std::stringstream ss; SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance(); ss << id_generator.GetNextIDNumber(); - table_id = ss.str(); + collection_id = ss.str(); return Status::OK(); } @@ -164,25 +164,25 @@ SqliteMetaImpl::Initialize() { } Status -SqliteMetaImpl::CreateTable(TableSchema& table_schema) { +SqliteMetaImpl::CreateTable(CollectionSchema& table_schema) { try { server::MetricCollector metric; // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - if (table_schema.table_id_ == "") { - NextTableId(table_schema.table_id_); + if (table_schema.collection_id_ == "") { + NextTableId(table_schema.collection_id_); } else { fiu_do_on("SqliteMetaImpl.CreateTable.throw_exception", throw std::exception()); - auto table = ConnectorPtr->select(columns(&TableSchema::state_), - where(c(&TableSchema::table_id_) == table_schema.table_id_)); - if (table.size() == 1) { - if (TableSchema::TO_DELETE == std::get<0>(table[0])) { - return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); + auto collection = ConnectorPtr->select(columns(&CollectionSchema::state_), + where(c(&CollectionSchema::collection_id_) == table_schema.collection_id_)); + if (collection.size() == 1) { + if (CollectionSchema::TO_DELETE == std::get<0>(collection[0])) { + return Status(DB_ERROR, "Collection already exists and it is in delete state, please wait a second"); } else { // Change from no error to already exist. - return Status(DB_ALREADY_EXIST, "Table already exists"); + return Status(DB_ALREADY_EXIST, "Collection already exists"); } } } @@ -195,19 +195,19 @@ SqliteMetaImpl::CreateTable(TableSchema& table_schema) { auto id = ConnectorPtr->insert(table_schema); table_schema.id_ = id; } catch (std::exception& e) { - return HandleException("Encounter exception when create table", e.what()); + return HandleException("Encounter exception when create collection", e.what()); } - ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; + ENGINE_LOG_DEBUG << "Successfully create collection: " << table_schema.collection_id_; - return utils::CreateTablePath(options_, table_schema.table_id_); + return utils::CreateTablePath(options_, table_schema.collection_id_); } catch (std::exception& e) { - return HandleException("Encounter exception when create table", e.what()); + return HandleException("Encounter exception when create collection", e.what()); } } Status -SqliteMetaImpl::DescribeTable(TableSchema& table_schema) { +SqliteMetaImpl::DescribeTable(CollectionSchema& table_schema) { try { server::MetricCollector metric; @@ -215,12 +215,12 @@ SqliteMetaImpl::DescribeTable(TableSchema& table_schema) { std::lock_guard meta_lock(meta_mutex_); fiu_do_on("SqliteMetaImpl.DescribeTable.throw_exception", throw std::exception()); auto groups = ConnectorPtr->select( - columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, &TableSchema::created_on_, - &TableSchema::flag_, &TableSchema::index_file_size_, &TableSchema::engine_type_, - &TableSchema::index_params_, &TableSchema::metric_type_, &TableSchema::owner_table_, - &TableSchema::partition_tag_, &TableSchema::version_, &TableSchema::flush_lsn_), - where(c(&TableSchema::table_id_) == table_schema.table_id_ and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + columns(&CollectionSchema::id_, &CollectionSchema::state_, &CollectionSchema::dimension_, &CollectionSchema::created_on_, + &CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::engine_type_, + &CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::owner_table_, + &CollectionSchema::partition_tag_, &CollectionSchema::version_, &CollectionSchema::flush_lsn_), + where(c(&CollectionSchema::collection_id_) == table_schema.collection_id_ and + c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); if (groups.size() == 1) { table_schema.id_ = std::get<0>(groups[0]); @@ -237,63 +237,63 @@ SqliteMetaImpl::DescribeTable(TableSchema& table_schema) { table_schema.version_ = std::get<11>(groups[0]); table_schema.flush_lsn_ = std::get<12>(groups[0]); } else { - return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); + return Status(DB_NOT_FOUND, "Collection " + table_schema.collection_id_ + " not found"); } } catch (std::exception& e) { - return HandleException("Encounter exception when describe table", e.what()); + return HandleException("Encounter exception when describe collection", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { +SqliteMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) { has_or_not = false; try { fiu_do_on("SqliteMetaImpl.HasTable.throw_exception", throw std::exception()); server::MetricCollector metric; auto tables = ConnectorPtr->select( - columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + columns(&CollectionSchema::id_), + where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); if (tables.size() == 1) { has_or_not = true; } else { has_or_not = false; } } catch (std::exception& e) { - return HandleException("Encounter exception when lookup table", e.what()); + return HandleException("Encounter exception when lookup collection", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::AllTables(std::vector& table_schema_array) { +SqliteMetaImpl::AllTables(std::vector& table_schema_array) { try { fiu_do_on("SqliteMetaImpl.AllTables.throw_exception", throw std::exception()); server::MetricCollector metric; auto selected = ConnectorPtr->select( - columns(&TableSchema::id_, &TableSchema::table_id_, &TableSchema::dimension_, &TableSchema::created_on_, - &TableSchema::flag_, &TableSchema::index_file_size_, &TableSchema::engine_type_, - &TableSchema::index_params_, &TableSchema::metric_type_, &TableSchema::owner_table_, - &TableSchema::partition_tag_, &TableSchema::version_, &TableSchema::flush_lsn_), - where(c(&TableSchema::state_) != (int)TableSchema::TO_DELETE and c(&TableSchema::owner_table_) == "")); - for (auto& table : selected) { - TableSchema schema; - schema.id_ = std::get<0>(table); - schema.table_id_ = std::get<1>(table); - schema.dimension_ = std::get<2>(table); - schema.created_on_ = std::get<3>(table); - schema.flag_ = std::get<4>(table); - schema.index_file_size_ = std::get<5>(table); - schema.engine_type_ = std::get<6>(table); - schema.index_params_ = std::get<7>(table); - schema.metric_type_ = std::get<8>(table); - schema.owner_table_ = std::get<9>(table); - schema.partition_tag_ = std::get<10>(table); - schema.version_ = std::get<11>(table); - schema.flush_lsn_ = std::get<12>(table); + columns(&CollectionSchema::id_, &CollectionSchema::collection_id_, &CollectionSchema::dimension_, &CollectionSchema::created_on_, + &CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::engine_type_, + &CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::owner_table_, + &CollectionSchema::partition_tag_, &CollectionSchema::version_, &CollectionSchema::flush_lsn_), + where(c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE and c(&CollectionSchema::owner_table_) == "")); + for (auto& collection : selected) { + CollectionSchema schema; + schema.id_ = std::get<0>(collection); + schema.collection_id_ = std::get<1>(collection); + schema.dimension_ = std::get<2>(collection); + schema.created_on_ = std::get<3>(collection); + schema.flag_ = std::get<4>(collection); + schema.index_file_size_ = std::get<5>(collection); + schema.engine_type_ = std::get<6>(collection); + schema.index_params_ = std::get<7>(collection); + schema.metric_type_ = std::get<8>(collection); + schema.owner_table_ = std::get<9>(collection); + schema.partition_tag_ = std::get<10>(collection); + schema.version_ = std::get<11>(collection); + schema.flush_lsn_ = std::get<12>(collection); table_schema_array.emplace_back(schema); } @@ -305,7 +305,7 @@ SqliteMetaImpl::AllTables(std::vector& table_schema_array) { } Status -SqliteMetaImpl::DropTable(const std::string& table_id) { +SqliteMetaImpl::DropTable(const std::string& collection_id) { try { fiu_do_on("SqliteMetaImpl.DropTable.throw_exception", throw std::exception()); @@ -314,21 +314,21 @@ SqliteMetaImpl::DropTable(const std::string& table_id) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - // soft delete table + // soft delete collection ConnectorPtr->update_all( - set(c(&TableSchema::state_) = (int)TableSchema::TO_DELETE), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + set(c(&CollectionSchema::state_) = (int)CollectionSchema::TO_DELETE), + where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); - ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully delete collection, collection id = " << collection_id; } catch (std::exception& e) { - return HandleException("Encounter exception when delete table", e.what()); + return HandleException("Encounter exception when delete collection", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::DeleteTableFiles(const std::string& table_id) { +SqliteMetaImpl::DeleteTableFiles(const std::string& collection_id) { try { fiu_do_on("SqliteMetaImpl.DeleteTableFiles.throw_exception", throw std::exception()); @@ -337,27 +337,27 @@ SqliteMetaImpl::DeleteTableFiles(const std::string& table_id) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - // soft delete table files - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + // soft delete collection files + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::TO_DELETE, + c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&SegmentSchema::collection_id_) == collection_id and + c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE)); - ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully delete collection files, collection id = " << collection_id; } catch (std::exception& e) { - return HandleException("Encounter exception when delete table files", e.what()); + return HandleException("Encounter exception when delete collection files", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::CreateTableFile(TableFileSchema& file_schema) { +SqliteMetaImpl::CreateTableFile(SegmentSchema& file_schema) { if (file_schema.date_ == EmptyDate) { file_schema.date_ = utils::GetDate(); } - TableSchema table_schema; - table_schema.table_id_ = file_schema.table_id_; + CollectionSchema table_schema; + table_schema.collection_id_ = file_schema.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -387,30 +387,30 @@ SqliteMetaImpl::CreateTableFile(TableFileSchema& file_schema) { auto id = ConnectorPtr->insert(file_schema); file_schema.id_ = id; - ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; + ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_; return utils::CreateTableFilePath(options_, file_schema); } catch (std::exception& e) { - return HandleException("Encounter exception when create table file", e.what()); + return HandleException("Encounter exception when create collection file", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, - TableFilesSchema& table_files) { +SqliteMetaImpl::GetTableFiles(const std::string& collection_id, const std::vector& ids, + SegmentsSchema& table_files) { try { fiu_do_on("SqliteMetaImpl.GetTableFiles.throw_exception", throw std::exception()); table_files.clear(); auto files = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::segment_id_, &TableFileSchema::file_id_, - &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, - &TableFileSchema::date_, &TableFileSchema::engine_type_, &TableFileSchema::created_on_), - where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::id_, ids) and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); - TableSchema table_schema; - table_schema.table_id_ = table_id; + columns(&SegmentSchema::id_, &SegmentSchema::segment_id_, &SegmentSchema::file_id_, + &SegmentSchema::file_type_, &SegmentSchema::file_size_, &SegmentSchema::row_count_, + &SegmentSchema::date_, &SegmentSchema::engine_type_, &SegmentSchema::created_on_), + where(c(&SegmentSchema::collection_id_) == collection_id and in(&SegmentSchema::id_, ids) and + c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE)); + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -418,8 +418,8 @@ SqliteMetaImpl::GetTableFiles(const std::string& table_id, const std::vector(file); file_schema.segment_id_ = std::get<1>(file); file_schema.file_id_ = std::get<2>(file); @@ -439,37 +439,37 @@ SqliteMetaImpl::GetTableFiles(const std::string& table_id, const std::vectorselect( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::file_size_, - &TableFileSchema::row_count_, &TableFileSchema::date_, &TableFileSchema::engine_type_, - &TableFileSchema::created_on_), - where(c(&TableFileSchema::segment_id_) == segment_id and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::file_id_, &SegmentSchema::file_type_, &SegmentSchema::file_size_, + &SegmentSchema::row_count_, &SegmentSchema::date_, &SegmentSchema::engine_type_, + &SegmentSchema::created_on_), + where(c(&SegmentSchema::segment_id_) == segment_id and + c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE)); if (!files.empty()) { - TableSchema table_schema; - table_schema.table_id_ = std::get<1>(files[0]); + CollectionSchema table_schema; + table_schema.collection_id_ = std::get<1>(files[0]); auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } for (auto& file : files) { - TableFileSchema file_schema; - file_schema.table_id_ = table_schema.table_id_; + SegmentSchema file_schema; + file_schema.collection_id_ = table_schema.collection_id_; file_schema.id_ = std::get<0>(file); file_schema.segment_id_ = std::get<2>(file); file_schema.file_id_ = std::get<3>(file); @@ -489,24 +489,24 @@ SqliteMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id, } } - ENGINE_LOG_DEBUG << "Get table files by segment id"; + ENGINE_LOG_DEBUG << "Get collection files by segment id"; return Status::OK(); } catch (std::exception& e) { - return HandleException("Encounter exception when lookup table files by segment id", e.what()); + return HandleException("Encounter exception when lookup collection files by segment id", e.what()); } } Status -SqliteMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { +SqliteMetaImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.UpdateTableFlag.throw_exception", throw std::exception()); // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableSchema::flag_) = flag), where(c(&TableSchema::table_id_) == table_id)); - ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; + ConnectorPtr->update_all(set(c(&CollectionSchema::flag_) = flag), where(c(&CollectionSchema::collection_id_) == collection_id)); + ENGINE_LOG_DEBUG << "Successfully update collection flag, collection id = " << collection_id; } catch (std::exception& e) { - std::string msg = "Encounter exception when update table flag: table_id = " + table_id; + std::string msg = "Encounter exception when update collection flag: collection_id = " + collection_id; return HandleException(msg, e.what()); } @@ -514,15 +514,15 @@ SqliteMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { } Status -SqliteMetaImpl::UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) { +SqliteMetaImpl::UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) { try { server::MetricCollector metric; - ConnectorPtr->update_all(set(c(&TableSchema::flush_lsn_) = flush_lsn), - where(c(&TableSchema::table_id_) == table_id)); - ENGINE_LOG_DEBUG << "Successfully update table flush_lsn, table id = " << table_id; + ConnectorPtr->update_all(set(c(&CollectionSchema::flush_lsn_) = flush_lsn), + where(c(&CollectionSchema::collection_id_) == collection_id)); + ENGINE_LOG_DEBUG << "Successfully update collection flush_lsn, collection id = " << collection_id; } catch (std::exception& e) { - std::string msg = "Encounter exception when update table lsn: table_id = " + table_id; + std::string msg = "Encounter exception when update collection lsn: collection_id = " + collection_id; return HandleException(msg, e.what()); } @@ -530,47 +530,47 @@ SqliteMetaImpl::UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_ } Status -SqliteMetaImpl::GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) { +SqliteMetaImpl::GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) { try { server::MetricCollector metric; auto selected = - ConnectorPtr->select(columns(&TableSchema::flush_lsn_), where(c(&TableSchema::table_id_) == table_id)); + ConnectorPtr->select(columns(&CollectionSchema::flush_lsn_), where(c(&CollectionSchema::collection_id_) == collection_id)); if (selected.size() > 0) { flush_lsn = std::get<0>(selected[0]); } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + return Status(DB_NOT_FOUND, "Collection " + collection_id + " not found"); } } catch (std::exception& e) { - return HandleException("Encounter exception when getting table files by flush_lsn", e.what()); + return HandleException("Encounter exception when getting collection files by flush_lsn", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) { +SqliteMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) { table_files.clear(); try { server::MetricCollector metric; auto selected = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::file_size_, - &TableFileSchema::row_count_, &TableFileSchema::date_, &TableFileSchema::engine_type_, - &TableFileSchema::created_on_), - where(c(&TableFileSchema::flush_lsn_) == flush_lsn)); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::file_id_, &SegmentSchema::file_type_, &SegmentSchema::file_size_, + &SegmentSchema::row_count_, &SegmentSchema::date_, &SegmentSchema::engine_type_, + &SegmentSchema::created_on_), + where(c(&SegmentSchema::flush_lsn_) == flush_lsn)); - std::map groups; - TableFileSchema table_file; + std::map groups; + SegmentSchema table_file; Status ret; for (auto& file : selected) { table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); + table_file.collection_id_ = std::get<1>(file); table_file.segment_id_ = std::get<2>(file); table_file.file_id_ = std::get<3>(file); table_file.file_type_ = std::get<4>(file); @@ -584,20 +584,20 @@ SqliteMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& ta if (!status.ok()) { ret = status; } - auto groupItr = groups.find(table_file.table_id_); + auto groupItr = groups.find(table_file.collection_id_); if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; + CollectionSchema table_schema; + table_schema.collection_id_ = table_file.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } - groups[table_file.table_id_] = table_schema; + groups[table_file.collection_id_] = table_schema; } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.index_params_ = groups[table_file.table_id_].index_params_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + table_file.dimension_ = groups[table_file.collection_id_].dimension_; + table_file.index_file_size_ = groups[table_file.collection_id_].index_file_size_; + table_file.index_params_ = groups[table_file.collection_id_].index_params_; + table_file.metric_type_ = groups[table_file.collection_id_].metric_type_; table_files.push_back(table_file); } @@ -606,12 +606,12 @@ SqliteMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& ta } return ret; } catch (std::exception& e) { - return HandleException("Encounter exception when getting table files by flush_lsn", e.what()); + return HandleException("Encounter exception when getting collection files by flush_lsn", e.what()); } } Status -SqliteMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { +SqliteMetaImpl::UpdateTableFile(SegmentSchema& file_schema) { file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); try { server::MetricCollector metric; @@ -620,28 +620,28 @@ SqliteMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto tables = ConnectorPtr->select(columns(&TableSchema::state_), - where(c(&TableSchema::table_id_) == file_schema.table_id_)); + auto tables = ConnectorPtr->select(columns(&CollectionSchema::state_), + where(c(&CollectionSchema::collection_id_) == file_schema.collection_id_)); - // if the table has been deleted, just mark the table file as TO_DELETE + // if the collection has been deleted, just mark the collection file as TO_DELETE // clean thread will delete the file later - if (tables.size() < 1 || std::get<0>(tables[0]) == (int)TableSchema::TO_DELETE) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; + if (tables.size() < 1 || std::get<0>(tables[0]) == (int)CollectionSchema::TO_DELETE) { + file_schema.file_type_ = SegmentSchema::TO_DELETE; } ConnectorPtr->update(file_schema); - ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; + ENGINE_LOG_DEBUG << "Update single collection file, file id = " << file_schema.file_id_; } catch (std::exception& e) { std::string msg = - "Exception update table file: table_id = " + file_schema.table_id_ + " file_id = " + file_schema.file_id_; + "Exception update collection file: collection_id = " + file_schema.collection_id_ + " file_id = " + file_schema.file_id_; return HandleException(msg, e.what()); } return Status::OK(); } Status -SqliteMetaImpl::UpdateTableFiles(TableFilesSchema& files) { +SqliteMetaImpl::UpdateTableFiles(SegmentsSchema& files) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.UpdateTableFiles.throw_exception", throw std::exception()); @@ -651,23 +651,23 @@ SqliteMetaImpl::UpdateTableFiles(TableFilesSchema& files) { std::map has_tables; for (auto& file : files) { - if (has_tables.find(file.table_id_) != has_tables.end()) { + if (has_tables.find(file.collection_id_) != has_tables.end()) { continue; } - auto tables = ConnectorPtr->select(columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == file.table_id_ and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&CollectionSchema::id_), + where(c(&CollectionSchema::collection_id_) == file.collection_id_ and + c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); if (tables.size() >= 1) { - has_tables[file.table_id_] = true; + has_tables[file.collection_id_] = true; } else { - has_tables[file.table_id_] = false; + has_tables[file.collection_id_] = false; } } auto commited = ConnectorPtr->transaction([&]() mutable { for (auto& file : files) { - if (!has_tables[file.table_id_]) { - file.file_type_ = TableFileSchema::TO_DELETE; + if (!has_tables[file.collection_id_]) { + file.file_type_ = SegmentSchema::TO_DELETE; } file.updated_time_ = utils::GetMicroSecTimeStamp(); @@ -681,15 +681,15 @@ SqliteMetaImpl::UpdateTableFiles(TableFilesSchema& files) { return HandleException("UpdateTableFiles error: sqlite transaction failed"); } - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files"; } catch (std::exception& e) { - return HandleException("Encounter exception when update table files", e.what()); + return HandleException("Encounter exception when update collection files", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::UpdateTableFilesRowCount(TableFilesSchema& files) { +SqliteMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) { try { server::MetricCollector metric; @@ -697,19 +697,19 @@ SqliteMetaImpl::UpdateTableFilesRowCount(TableFilesSchema& files) { std::lock_guard meta_lock(meta_mutex_); for (auto& file : files) { - ConnectorPtr->update_all(set(c(&TableFileSchema::row_count_) = file.row_count_, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::file_id_) == file.file_id_)); + ConnectorPtr->update_all(set(c(&SegmentSchema::row_count_) = file.row_count_, + c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&SegmentSchema::file_id_) == file.file_id_)); ENGINE_LOG_DEBUG << "Update file " << file.file_id_ << " row count to " << file.row_count_; } } catch (std::exception& e) { - return HandleException("Encounter exception when update table files row count", e.what()); + return HandleException("Encounter exception when update collection files row count", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { +SqliteMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIndex& index) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.UpdateTableIndex.throw_exception", throw std::exception()); @@ -718,15 +718,15 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& std::lock_guard meta_lock(meta_mutex_); auto tables = ConnectorPtr->select( - columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, &TableSchema::created_on_, - &TableSchema::flag_, &TableSchema::index_file_size_, &TableSchema::owner_table_, - &TableSchema::partition_tag_, &TableSchema::version_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + columns(&CollectionSchema::id_, &CollectionSchema::state_, &CollectionSchema::dimension_, &CollectionSchema::created_on_, + &CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::owner_table_, + &CollectionSchema::partition_tag_, &CollectionSchema::version_), + where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); if (tables.size() > 0) { - meta::TableSchema table_schema; + meta::CollectionSchema table_schema; table_schema.id_ = std::get<0>(tables[0]); - table_schema.table_id_ = table_id; + table_schema.collection_id_ = collection_id; table_schema.state_ = std::get<1>(tables[0]); table_schema.dimension_ = std::get<2>(tables[0]); table_schema.created_on_ = std::get<3>(tables[0]); @@ -741,18 +741,18 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& ConnectorPtr->update(table_schema); } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + return Status(DB_NOT_FOUND, "Collection " + collection_id + " not found"); } // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::RAW, + c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&SegmentSchema::collection_id_) == collection_id and + c(&SegmentSchema::file_type_) == (int)SegmentSchema::BACKUP)); - ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully update collection index, collection id = " << collection_id; } catch (std::exception& e) { - std::string msg = "Encounter exception when update table index: table_id = " + table_id; + std::string msg = "Encounter exception when update collection index: collection_id = " + collection_id; return HandleException(msg, e.what()); } @@ -760,7 +760,7 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& } Status -SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { +SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.UpdateTableFilesToIndex.throw_exception", throw std::exception()); @@ -768,35 +768,35 @@ SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_INDEX), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::row_count_) >= meta::BUILD_INDEX_THRESHOLD and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::RAW)); + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::TO_INDEX), + where(c(&SegmentSchema::collection_id_) == collection_id and + c(&SegmentSchema::row_count_) >= meta::BUILD_INDEX_THRESHOLD and + c(&SegmentSchema::file_type_) == (int)SegmentSchema::RAW)); - ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; + ENGINE_LOG_DEBUG << "Update files to to_index, collection id = " << collection_id; } catch (std::exception& e) { - return HandleException("Encounter exception when update table files to to_index", e.what()); + return HandleException("Encounter exception when update collection files to to_index", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { +SqliteMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex& index) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.DescribeTableIndex.throw_exception", throw std::exception()); auto groups = ConnectorPtr->select( - columns(&TableSchema::engine_type_, &TableSchema::index_params_, &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + columns(&CollectionSchema::engine_type_, &CollectionSchema::index_params_, &CollectionSchema::metric_type_), + where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); if (groups.size() == 1) { index.engine_type_ = std::get<0>(groups[0]); index.extra_params_ = milvus::json::parse(std::get<1>(groups[0])); index.metric_type_ = std::get<2>(groups[0]); } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + return Status(DB_NOT_FOUND, "Collection " + collection_id + " not found"); } } catch (std::exception& e) { return HandleException("Encounter exception when describe index", e.what()); @@ -806,7 +806,7 @@ SqliteMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& inde } Status -SqliteMetaImpl::DropTableIndex(const std::string& table_id) { +SqliteMetaImpl::DropTableIndex(const std::string& collection_id) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.DropTableIndex.throw_exception", throw std::exception()); @@ -815,20 +815,20 @@ SqliteMetaImpl::DropTableIndex(const std::string& table_id) { std::lock_guard meta_lock(meta_mutex_); // soft delete index files - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::INDEX)); + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::TO_DELETE, + c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&SegmentSchema::collection_id_) == collection_id and + c(&SegmentSchema::file_type_) == (int)SegmentSchema::INDEX)); // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::RAW, + c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(c(&SegmentSchema::collection_id_) == collection_id and + c(&SegmentSchema::file_type_) == (int)SegmentSchema::BACKUP)); - // set table index type to raw - auto groups = ConnectorPtr->select(columns(&TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_id)); + // set collection index type to raw + auto groups = ConnectorPtr->select(columns(&CollectionSchema::metric_type_), + where(c(&CollectionSchema::collection_id_) == collection_id)); int32_t raw_engine_type = DEFAULT_ENGINE_TYPE; if (groups.size() == 1) { @@ -838,24 +838,24 @@ SqliteMetaImpl::DropTableIndex(const std::string& table_id) { } } ConnectorPtr->update_all( - set(c(&TableSchema::engine_type_) = raw_engine_type, c(&TableSchema::index_params_) = "{}"), - where(c(&TableSchema::table_id_) == table_id)); + set(c(&CollectionSchema::engine_type_) = raw_engine_type, c(&CollectionSchema::index_params_) = "{}"), + where(c(&CollectionSchema::collection_id_) == collection_id)); - ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; + ENGINE_LOG_DEBUG << "Successfully drop collection index, collection id = " << collection_id; } catch (std::exception& e) { - return HandleException("Encounter exception when delete table index files", e.what()); + return HandleException("Encounter exception when delete collection index files", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag, +SqliteMetaImpl::CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag, uint64_t lsn) { server::MetricCollector metric; - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -873,22 +873,22 @@ SqliteMetaImpl::CreatePartition(const std::string& table_id, const std::string& // not allow duplicated partition std::string exist_partition; - GetPartitionName(table_id, valid_tag, exist_partition); + GetPartitionName(collection_id, valid_tag, exist_partition); if (!exist_partition.empty()) { return Status(DB_ERROR, "Duplicate partition is not allowed"); } if (partition_name == "") { // generate unique partition name - NextTableId(table_schema.table_id_); + NextTableId(table_schema.collection_id_); } else { - table_schema.table_id_ = partition_name; + table_schema.collection_id_ = partition_name; } table_schema.id_ = -1; table_schema.flag_ = 0; table_schema.created_on_ = utils::GetMicroSecTimeStamp(); - table_schema.owner_table_ = table_id; + table_schema.owner_table_ = collection_id; table_schema.partition_tag_ = valid_tag; table_schema.flush_lsn_ = lsn; @@ -906,21 +906,21 @@ SqliteMetaImpl::DropPartition(const std::string& partition_name) { } Status -SqliteMetaImpl::ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) { +SqliteMetaImpl::ShowPartitions(const std::string& collection_id, std::vector& partition_schema_array) { try { server::MetricCollector metric; fiu_do_on("SqliteMetaImpl.ShowPartitions.throw_exception", throw std::exception()); auto partitions = ConnectorPtr->select( - columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, &TableSchema::created_on_, - &TableSchema::flag_, &TableSchema::index_file_size_, &TableSchema::engine_type_, - &TableSchema::index_params_, &TableSchema::metric_type_, &TableSchema::partition_tag_, - &TableSchema::version_, &TableSchema::table_id_), - where(c(&TableSchema::owner_table_) == table_id and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + columns(&CollectionSchema::id_, &CollectionSchema::state_, &CollectionSchema::dimension_, &CollectionSchema::created_on_, + &CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::engine_type_, + &CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::partition_tag_, + &CollectionSchema::version_, &CollectionSchema::collection_id_), + where(c(&CollectionSchema::owner_table_) == collection_id and + c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); for (size_t i = 0; i < partitions.size(); i++) { - meta::TableSchema partition_schema; + meta::CollectionSchema partition_schema; partition_schema.id_ = std::get<0>(partitions[i]); partition_schema.state_ = std::get<1>(partitions[i]); partition_schema.dimension_ = std::get<2>(partitions[i]); @@ -930,10 +930,10 @@ SqliteMetaImpl::ShowPartitions(const std::string& table_id, std::vector(partitions[i]); partition_schema.index_params_ = std::get<7>(partitions[i]); partition_schema.metric_type_ = std::get<8>(partitions[i]); - partition_schema.owner_table_ = table_id; + partition_schema.owner_table_ = collection_id; partition_schema.partition_tag_ = std::get<9>(partitions[i]); partition_schema.version_ = std::get<10>(partitions[i]); - partition_schema.table_id_ = std::get<11>(partitions[i]); + partition_schema.collection_id_ = std::get<11>(partitions[i]); partition_schema_array.emplace_back(partition_schema); } } catch (std::exception& e) { @@ -944,7 +944,7 @@ SqliteMetaImpl::ShowPartitions(const std::string& table_id, std::vectorselect( - columns(&TableSchema::table_id_), - where(c(&TableSchema::owner_table_) == table_id and c(&TableSchema::partition_tag_) == valid_tag and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + columns(&CollectionSchema::collection_id_), + where(c(&CollectionSchema::owner_table_) == collection_id and c(&CollectionSchema::partition_tag_) == valid_tag and + c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE)); if (name.size() > 0) { partition_name = std::get<0>(name[0]); } else { - return Status(DB_NOT_FOUND, "Table " + table_id + "'s partition " + valid_tag + " not found"); + return Status(DB_NOT_FOUND, "Collection " + collection_id + "'s partition " + valid_tag + " not found"); } } catch (std::exception& e) { return HandleException("Encounter exception when get partition name", e.what()); @@ -971,7 +971,7 @@ SqliteMetaImpl::GetPartitionName(const std::string& table_id, const std::string& } Status -SqliteMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& files) { +SqliteMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema& files) { files.clear(); try { @@ -979,18 +979,18 @@ SqliteMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& fil fiu_do_on("SqliteMetaImpl.FilesToSearch.throw_exception", throw std::exception()); auto select_columns = - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::file_size_, - &TableFileSchema::row_count_, &TableFileSchema::date_, &TableFileSchema::engine_type_); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::file_id_, &SegmentSchema::file_type_, &SegmentSchema::file_size_, + &SegmentSchema::row_count_, &SegmentSchema::date_, &SegmentSchema::engine_type_); - auto match_tableid = c(&TableFileSchema::table_id_) == table_id; + auto match_tableid = c(&SegmentSchema::collection_id_) == collection_id; - std::vector file_types = {(int)TableFileSchema::RAW, (int)TableFileSchema::TO_INDEX, - (int)TableFileSchema::INDEX}; - auto match_type = in(&TableFileSchema::file_type_, file_types); + std::vector file_types = {(int)SegmentSchema::RAW, (int)SegmentSchema::TO_INDEX, + (int)SegmentSchema::INDEX}; + auto match_type = in(&SegmentSchema::file_type_, file_types); - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1003,9 +1003,9 @@ SqliteMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& fil Status ret; for (auto& file : selected) { - TableFileSchema table_file; + SegmentSchema table_file; table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); + table_file.collection_id_ = std::get<1>(file); table_file.segment_id_ = std::get<2>(file); table_file.file_id_ = std::get<3>(file); table_file.file_type_ = std::get<4>(file); @@ -1026,7 +1026,7 @@ SqliteMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& fil files.emplace_back(table_file); } if (files.empty()) { - ENGINE_LOG_ERROR << "No file to search for table: " << table_id; + ENGINE_LOG_ERROR << "No file to search for collection: " << collection_id; } if (selected.size() > 0) { @@ -1039,7 +1039,7 @@ SqliteMetaImpl::FilesToSearch(const std::string& table_id, TableFilesSchema& fil } Status -SqliteMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& files) { +SqliteMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& files) { files.clear(); try { @@ -1047,9 +1047,9 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& file server::MetricCollector metric; - // check table existence - TableSchema table_schema; - table_schema.table_id_ = table_id; + // check collection existence + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1057,24 +1057,24 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& file // get files to merge auto selected = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::file_size_, - &TableFileSchema::row_count_, &TableFileSchema::date_, &TableFileSchema::created_on_), - where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::RAW and - c(&TableFileSchema::table_id_) == table_id), - order_by(&TableFileSchema::file_size_).desc()); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::file_id_, &SegmentSchema::file_type_, &SegmentSchema::file_size_, + &SegmentSchema::row_count_, &SegmentSchema::date_, &SegmentSchema::created_on_), + where(c(&SegmentSchema::file_type_) == (int)SegmentSchema::RAW and + c(&SegmentSchema::collection_id_) == collection_id), + order_by(&SegmentSchema::file_size_).desc()); Status result; int64_t to_merge_files = 0; for (auto& file : selected) { - TableFileSchema table_file; + SegmentSchema table_file; table_file.file_size_ = std::get<5>(file); if (table_file.file_size_ >= table_schema.index_file_size_) { continue; // skip large file } table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); + table_file.collection_id_ = std::get<1>(file); table_file.segment_id_ = std::get<2>(file); table_file.file_id_ = std::get<3>(file); table_file.file_type_ = std::get<4>(file); @@ -1105,7 +1105,7 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, TableFilesSchema& file } Status -SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { +SqliteMetaImpl::FilesToIndex(SegmentsSchema& files) { files.clear(); try { @@ -1114,19 +1114,19 @@ SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { server::MetricCollector metric; auto selected = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::file_size_, - &TableFileSchema::row_count_, &TableFileSchema::date_, &TableFileSchema::engine_type_, - &TableFileSchema::created_on_), - where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_INDEX)); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::file_id_, &SegmentSchema::file_type_, &SegmentSchema::file_size_, + &SegmentSchema::row_count_, &SegmentSchema::date_, &SegmentSchema::engine_type_, + &SegmentSchema::created_on_), + where(c(&SegmentSchema::file_type_) == (int)SegmentSchema::TO_INDEX)); - std::map groups; - TableFileSchema table_file; + std::map groups; + SegmentSchema table_file; Status ret; for (auto& file : selected) { table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); + table_file.collection_id_ = std::get<1>(file); table_file.segment_id_ = std::get<2>(file); table_file.file_id_ = std::get<3>(file); table_file.file_type_ = std::get<4>(file); @@ -1140,22 +1140,22 @@ SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { if (!status.ok()) { ret = status; } - auto groupItr = groups.find(table_file.table_id_); + auto groupItr = groups.find(table_file.collection_id_); if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; + CollectionSchema table_schema; + table_schema.collection_id_ = table_file.collection_id_; auto status = DescribeTable(table_schema); fiu_do_on("SqliteMetaImpl_FilesToIndex_TableNotFound", - status = Status(DB_NOT_FOUND, "table not found")); + status = Status(DB_NOT_FOUND, "collection not found")); if (!status.ok()) { return status; } - groups[table_file.table_id_] = table_schema; + groups[table_file.collection_id_] = table_schema; } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.index_params_ = groups[table_file.table_id_].index_params_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + table_file.dimension_ = groups[table_file.collection_id_].dimension_; + table_file.index_file_size_ = groups[table_file.collection_id_].index_file_size_; + table_file.index_params_ = groups[table_file.collection_id_].index_params_; + table_file.metric_type_ = groups[table_file.collection_id_].metric_type_; files.push_back(table_file); } @@ -1169,15 +1169,15 @@ SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { } Status -SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& file_types, TableFilesSchema& files) { +SqliteMetaImpl::FilesByType(const std::string& collection_id, const std::vector& file_types, SegmentsSchema& files) { if (file_types.empty()) { return Status(DB_ERROR, "file types array is empty"); } Status ret = Status::OK(); - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; @@ -1188,17 +1188,17 @@ SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& files.clear(); auto selected = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::segment_id_, &TableFileSchema::file_id_, - &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, - &TableFileSchema::date_, &TableFileSchema::engine_type_, &TableFileSchema::created_on_), - where(in(&TableFileSchema::file_type_, file_types) and c(&TableFileSchema::table_id_) == table_id)); + columns(&SegmentSchema::id_, &SegmentSchema::segment_id_, &SegmentSchema::file_id_, + &SegmentSchema::file_type_, &SegmentSchema::file_size_, &SegmentSchema::row_count_, + &SegmentSchema::date_, &SegmentSchema::engine_type_, &SegmentSchema::created_on_), + where(in(&SegmentSchema::file_type_, file_types) and c(&SegmentSchema::collection_id_) == collection_id)); if (selected.size() >= 1) { int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; int to_index_count = 0, index_count = 0, backup_count = 0; for (auto& file : selected) { - TableFileSchema file_schema; - file_schema.table_id_ = table_id; + SegmentSchema file_schema; + file_schema.collection_id_ = collection_id; file_schema.id_ = std::get<0>(file); file_schema.segment_id_ = std::get<1>(file); file_schema.file_id_ = std::get<2>(file); @@ -1215,19 +1215,19 @@ SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& file_schema.metric_type_ = table_schema.metric_type_; switch (file_schema.file_type_) { - case (int)TableFileSchema::RAW:++raw_count; + case (int)SegmentSchema::RAW:++raw_count; break; - case (int)TableFileSchema::NEW:++new_count; + case (int)SegmentSchema::NEW:++new_count; break; - case (int)TableFileSchema::NEW_MERGE:++new_merge_count; + case (int)SegmentSchema::NEW_MERGE:++new_merge_count; break; - case (int)TableFileSchema::NEW_INDEX:++new_index_count; + case (int)SegmentSchema::NEW_INDEX:++new_index_count; break; - case (int)TableFileSchema::TO_INDEX:++to_index_count; + case (int)SegmentSchema::TO_INDEX:++to_index_count; break; - case (int)TableFileSchema::INDEX:++index_count; + case (int)SegmentSchema::INDEX:++index_count; break; - case (int)TableFileSchema::BACKUP:++backup_count; + case (int)SegmentSchema::BACKUP:++backup_count; break; default:break; } @@ -1240,26 +1240,26 @@ SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& files.emplace_back(file_schema); } - std::string msg = "Get table files by type."; + std::string msg = "Get collection files by type."; for (int file_type : file_types) { switch (file_type) { - case (int)TableFileSchema::RAW:msg = msg + " raw files:" + std::to_string(raw_count); + case (int)SegmentSchema::RAW:msg = msg + " raw files:" + std::to_string(raw_count); break; - case (int)TableFileSchema::NEW:msg = msg + " new files:" + std::to_string(new_count); + case (int)SegmentSchema::NEW:msg = msg + " new files:" + std::to_string(new_count); break; - case (int)TableFileSchema::NEW_MERGE: + case (int)SegmentSchema::NEW_MERGE: msg = msg + " new_merge files:" + std::to_string(new_merge_count); break; - case (int)TableFileSchema::NEW_INDEX: + case (int)SegmentSchema::NEW_INDEX: msg = msg + " new_index files:" + std::to_string(new_index_count); break; - case (int)TableFileSchema::TO_INDEX:msg = msg + " to_index files:" + std::to_string(to_index_count); + case (int)SegmentSchema::TO_INDEX:msg = msg + " to_index files:" + std::to_string(to_index_count); break; - case (int)TableFileSchema::INDEX:msg = msg + " index files:" + std::to_string(index_count); + case (int)SegmentSchema::INDEX:msg = msg + " index files:" + std::to_string(index_count); break; - case (int)TableFileSchema::BACKUP:msg = msg + " backup files:" + std::to_string(backup_count); + case (int)SegmentSchema::BACKUP:msg = msg + " backup files:" + std::to_string(backup_count); break; default:break; } @@ -1274,7 +1274,7 @@ SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& } Status -SqliteMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& files) { +SqliteMetaImpl::FilesByID(const std::vector& ids, SegmentsSchema& files) { files.clear(); if (ids.empty()) { @@ -1286,27 +1286,27 @@ SqliteMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& file fiu_do_on("SqliteMetaImpl.FilesByID.throw_exception", throw std::exception()); auto select_columns = - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::file_id_, &TableFileSchema::file_type_, &TableFileSchema::file_size_, - &TableFileSchema::row_count_, &TableFileSchema::date_, &TableFileSchema::engine_type_); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::file_id_, &SegmentSchema::file_type_, &SegmentSchema::file_size_, + &SegmentSchema::row_count_, &SegmentSchema::date_, &SegmentSchema::engine_type_); - std::vector file_types = {(int)TableFileSchema::RAW, (int)TableFileSchema::TO_INDEX, - (int)TableFileSchema::INDEX}; - auto match_type = in(&TableFileSchema::file_type_, file_types); + std::vector file_types = {(int)SegmentSchema::RAW, (int)SegmentSchema::TO_INDEX, + (int)SegmentSchema::INDEX}; + auto match_type = in(&SegmentSchema::file_type_, file_types); // perform query decltype(ConnectorPtr->select(select_columns)) selected; - auto match_fileid = in(&TableFileSchema::id_, ids); + auto match_fileid = in(&SegmentSchema::id_, ids); auto filter = where(match_fileid and match_type); selected = ConnectorPtr->select(select_columns, filter); - std::map tables; + std::map tables; Status ret; for (auto& file : selected) { - TableFileSchema table_file; + SegmentSchema table_file; table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); + table_file.collection_id_ = std::get<1>(file); table_file.segment_id_ = std::get<2>(file); table_file.file_id_ = std::get<3>(file); table_file.file_type_ = std::get<4>(file); @@ -1315,14 +1315,14 @@ SqliteMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& file table_file.date_ = std::get<7>(file); table_file.engine_type_ = std::get<8>(file); - if (tables.find(table_file.table_id_) == tables.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; + if (tables.find(table_file.collection_id_) == tables.end()) { + CollectionSchema table_schema; + table_schema.collection_id_ = table_file.collection_id_; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } - tables.insert(std::make_pair(table_file.table_id_, table_schema)); + tables.insert(std::make_pair(table_file.collection_id_, table_schema)); } auto status = utils::GetTableFilePath(options_, table_file); @@ -1334,7 +1334,7 @@ SqliteMetaImpl::FilesByID(const std::vector& ids, TableFilesSchema& file } for (auto& table_file : files) { - TableSchema& table_schema = tables[table_file.table_id_]; + CollectionSchema& table_schema = tables[table_file.collection_id_]; table_file.dimension_ = table_schema.dimension_; table_file.index_file_size_ = table_schema.index_file_size_; table_file.index_params_ = table_schema.index_params_; @@ -1374,11 +1374,11 @@ SqliteMetaImpl::Archive() { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE), - where(c(&TableFileSchema::created_on_) < (int64_t)(now - usecs) and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::TO_DELETE), + where(c(&SegmentSchema::created_on_) < (int64_t)(now - usecs) and + c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE)); } catch (std::exception& e) { - return HandleException("Encounter exception when update table files", e.what()); + return HandleException("Encounter exception when update collection files", e.what()); } ENGINE_LOG_DEBUG << "Archive old files"; @@ -1403,8 +1403,8 @@ SqliteMetaImpl::Size(uint64_t& result) { try { fiu_do_on("SqliteMetaImpl.Size.throw_exception", throw std::exception()); - auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::file_size_)), - where(c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + auto selected = ConnectorPtr->select(columns(sum(&SegmentSchema::file_size_)), + where(c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE)); for (auto& total_size : selected) { if (!std::get<0>(total_size)) { continue; @@ -1426,15 +1426,15 @@ SqliteMetaImpl::CleanUpShadowFiles() { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - std::vector file_types = {(int)TableFileSchema::NEW, (int)TableFileSchema::NEW_INDEX, - (int)TableFileSchema::NEW_MERGE}; + std::vector file_types = {(int)SegmentSchema::NEW, (int)SegmentSchema::NEW_INDEX, + (int)SegmentSchema::NEW_MERGE}; auto files = - ConnectorPtr->select(columns(&TableFileSchema::id_), where(in(&TableFileSchema::file_type_, file_types))); + ConnectorPtr->select(columns(&SegmentSchema::id_), where(in(&SegmentSchema::file_type_, file_types))); auto commited = ConnectorPtr->transaction([&]() mutable { for (auto& file : files) { - ENGINE_LOG_DEBUG << "Remove table file type as NEW"; - ConnectorPtr->remove(std::get<0>(file)); + ENGINE_LOG_DEBUG << "Remove collection file type as NEW"; + ConnectorPtr->remove(std::get<0>(file)); } return true; }); @@ -1449,7 +1449,7 @@ SqliteMetaImpl::CleanUpShadowFiles() { ENGINE_LOG_DEBUG << "Clean " << files.size() << " files"; } } catch (std::exception& e) { - return HandleException("Encounter exception when clean table file", e.what()); + return HandleException("Encounter exception when clean collection file", e.what()); } return Status::OK(); @@ -1459,7 +1459,7 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/) { auto now = utils::GetMicroSecTimeStamp(); std::set table_ids; - std::map segment_ids; + std::map segment_ids; // remove to_delete files try { @@ -1468,8 +1468,8 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ server::MetricCollector metric; std::vector file_types = { - (int)TableFileSchema::TO_DELETE, - (int)TableFileSchema::BACKUP, + (int)SegmentSchema::TO_DELETE, + (int)SegmentSchema::BACKUP, }; // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here @@ -1477,18 +1477,18 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ // collect files to be deleted auto files = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::segment_id_, - &TableFileSchema::engine_type_, &TableFileSchema::file_id_, &TableFileSchema::file_type_, - &TableFileSchema::date_), - where(in(&TableFileSchema::file_type_, file_types) and - c(&TableFileSchema::updated_time_) < now - seconds * US_PS)); + columns(&SegmentSchema::id_, &SegmentSchema::collection_id_, &SegmentSchema::segment_id_, + &SegmentSchema::engine_type_, &SegmentSchema::file_id_, &SegmentSchema::file_type_, + &SegmentSchema::date_), + where(in(&SegmentSchema::file_type_, file_types) and + c(&SegmentSchema::updated_time_) < now - seconds * US_PS)); int64_t clean_files = 0; auto commited = ConnectorPtr->transaction([&]() mutable { - TableFileSchema table_file; + SegmentSchema table_file; for (auto& file : files) { table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); + table_file.collection_id_ = std::get<1>(file); table_file.segment_id_ = std::get<2>(file); table_file.engine_type_ = std::get<3>(file); table_file.file_id_ = std::get<4>(file); @@ -1508,16 +1508,16 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ utils::GetTableFilePath(options_, table_file); server::CommonUtil::EraseFromCache(table_file.location_); - if (table_file.file_type_ == (int)TableFileSchema::TO_DELETE) { + if (table_file.file_type_ == (int)SegmentSchema::TO_DELETE) { // delete file from meta - ConnectorPtr->remove(table_file.id_); + ConnectorPtr->remove(table_file.id_); // delete file from disk storage utils::DeleteTableFilePath(options_, table_file); ENGINE_LOG_DEBUG << "Remove file id:" << table_file.file_id_ << " location:" << table_file.location_; - table_ids.insert(table_file.table_id_); + table_ids.insert(table_file.collection_id_); segment_ids.insert(std::make_pair(table_file.segment_id_, table_file)); ++clean_files; @@ -1535,7 +1535,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ ENGINE_LOG_DEBUG << "Clean " << clean_files << " files expired in " << seconds << " seconds"; } } catch (std::exception& e) { - return HandleException("Encounter exception when clean table files", e.what()); + return HandleException("Encounter exception when clean collection files", e.what()); } // remove to_delete tables @@ -1546,13 +1546,13 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto tables = ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::table_id_), - where(c(&TableSchema::state_) == (int)TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&CollectionSchema::id_, &CollectionSchema::collection_id_), + where(c(&CollectionSchema::state_) == (int)CollectionSchema::TO_DELETE)); auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto& table : tables) { - utils::DeleteTablePath(options_, std::get<1>(table), false); // only delete empty folder - ConnectorPtr->remove(std::get<0>(table)); + for (auto& collection : tables) { + utils::DeleteTablePath(options_, std::get<1>(collection), false); // only delete empty folder + ConnectorPtr->remove(std::get<0>(collection)); } return true; @@ -1567,21 +1567,21 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ ENGINE_LOG_DEBUG << "Remove " << tables.size() << " tables from meta"; } } catch (std::exception& e) { - return HandleException("Encounter exception when clean table files", e.what()); + return HandleException("Encounter exception when clean collection files", e.what()); } - // remove deleted table folder - // don't remove table folder until all its files has been deleted + // remove deleted collection folder + // don't remove collection folder until all its files has been deleted try { fiu_do_on("SqliteMetaImpl.CleanUpFilesWithTTL.RemoveTableFolder_ThrowException", throw std::exception()); server::MetricCollector metric; int64_t remove_tables = 0; - for (auto& table_id : table_ids) { - auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_), - where(c(&TableFileSchema::table_id_) == table_id)); + for (auto& collection_id : table_ids) { + auto selected = ConnectorPtr->select(columns(&SegmentSchema::file_id_), + where(c(&SegmentSchema::collection_id_) == collection_id)); if (selected.size() == 0) { - utils::DeleteTablePath(options_, table_id); + utils::DeleteTablePath(options_, collection_id); ++remove_tables; } } @@ -1590,7 +1590,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ ENGINE_LOG_DEBUG << "Remove " << remove_tables << " tables folder"; } } catch (std::exception& e) { - return HandleException("Encounter exception when delete table folder", e.what()); + return HandleException("Encounter exception when delete collection folder", e.what()); } // remove deleted segment folder @@ -1601,8 +1601,8 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ int64_t remove_segments = 0; for (auto& segment_id : segment_ids) { - auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_), - where(c(&TableFileSchema::segment_id_) == segment_id.first)); + auto selected = ConnectorPtr->select(columns(&SegmentSchema::id_), + where(c(&SegmentSchema::segment_id_) == segment_id.first)); if (selected.size() == 0) { utils::DeleteSegment(options_, segment_id.second); std::string segment_dir; @@ -1616,27 +1616,27 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/ ENGINE_LOG_DEBUG << "Remove " << remove_segments << " segments folder"; } } catch (std::exception& e) { - return HandleException("Encounter exception when delete table folder", e.what()); + return HandleException("Encounter exception when delete collection folder", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::Count(const std::string& table_id, uint64_t& result) { +SqliteMetaImpl::Count(const std::string& collection_id, uint64_t& result) { try { fiu_do_on("SqliteMetaImpl.Count.throw_exception", throw std::exception()); server::MetricCollector metric; - std::vector file_types = {(int)TableFileSchema::RAW, (int)TableFileSchema::TO_INDEX, - (int)TableFileSchema::INDEX}; + std::vector file_types = {(int)SegmentSchema::RAW, (int)SegmentSchema::TO_INDEX, + (int)SegmentSchema::INDEX}; auto selected = ConnectorPtr->select( - columns(&TableFileSchema::row_count_), - where(in(&TableFileSchema::file_type_, file_types) and c(&TableFileSchema::table_id_) == table_id)); + columns(&SegmentSchema::row_count_), + where(in(&SegmentSchema::file_type_, file_types) and c(&SegmentSchema::collection_id_) == collection_id)); - TableSchema table_schema; - table_schema.table_id_ = table_id; + CollectionSchema table_schema; + table_schema.collection_id_ = collection_id; auto status = DescribeTable(table_schema); if (!status.ok()) { @@ -1648,7 +1648,7 @@ SqliteMetaImpl::Count(const std::string& table_id, uint64_t& result) { result += std::get<0>(file); } } catch (std::exception& e) { - return HandleException("Encounter exception when calculate table file size", e.what()); + return HandleException("Encounter exception when calculate collection file size", e.what()); } return Status::OK(); } @@ -1685,12 +1685,12 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { auto commited = ConnectorPtr->transaction([&]() mutable { auto selected = - ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::file_size_), - where(c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE), - order_by(&TableFileSchema::id_), limit(10)); + ConnectorPtr->select(columns(&SegmentSchema::id_, &SegmentSchema::file_size_), + where(c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE), + order_by(&SegmentSchema::id_), limit(10)); std::vector ids; - TableFileSchema table_file; + SegmentSchema table_file; for (auto& file : selected) { if (to_discard_size <= 0) @@ -1707,9 +1707,9 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { return true; } - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(in(&TableFileSchema::id_, ids))); + ConnectorPtr->update_all(set(c(&SegmentSchema::file_type_) = (int)SegmentSchema::TO_DELETE, + c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where(in(&SegmentSchema::id_, ids))); return true; }); @@ -1718,7 +1718,7 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { return HandleException("DiscardFiles error: sqlite transaction failed"); } } catch (std::exception& e) { - return HandleException("Encounter exception when discard table file", e.what()); + return HandleException("Encounter exception when discard collection file", e.what()); } return DiscardFiles(to_discard_size); @@ -1764,7 +1764,7 @@ SqliteMetaImpl::GetGlobalLastLSN(uint64_t& lsn) { lsn = std::get<0>(selected[0]); } } catch (std::exception& e) { - return HandleException("Encounter exception when delete table folder", e.what()); + return HandleException("Encounter exception when delete collection folder", e.what()); } return Status::OK(); diff --git a/core/src/db/meta/SqliteMetaImpl.h b/core/src/db/meta/SqliteMetaImpl.h index cdae712940..948b17fea0 100644 --- a/core/src/db/meta/SqliteMetaImpl.h +++ b/core/src/db/meta/SqliteMetaImpl.h @@ -31,92 +31,94 @@ class SqliteMetaImpl : public Meta { ~SqliteMetaImpl(); Status - CreateTable(TableSchema& table_schema) override; + CreateTable(CollectionSchema& table_schema) override; Status - DescribeTable(TableSchema& table_schema) override; + DescribeTable(CollectionSchema& table_schema) override; Status - HasTable(const std::string& table_id, bool& has_or_not) override; + HasTable(const std::string& collection_id, bool& has_or_not) override; Status - AllTables(std::vector& table_schema_array) override; + AllTables(std::vector& table_schema_array) override; Status - DropTable(const std::string& table_id) override; + DropTable(const std::string& collection_id) override; Status - DeleteTableFiles(const std::string& table_id) override; + DeleteTableFiles(const std::string& collection_id) override; Status - CreateTableFile(TableFileSchema& file_schema) override; + CreateTableFile(SegmentSchema& file_schema) override; Status - GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) override; + GetTableFiles(const std::string& collection_id, const std::vector& ids, + SegmentsSchema& table_files) override; Status - GetTableFilesBySegmentId(const std::string& segment_id, TableFilesSchema& table_files) override; + GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override; Status - UpdateTableIndex(const std::string& table_id, const TableIndex& index) override; + UpdateTableIndex(const std::string& collection_id, const TableIndex& index) override; Status - UpdateTableFlag(const std::string& table_id, int64_t flag) override; + UpdateTableFlag(const std::string& collection_id, int64_t flag) override; Status - UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) override; + UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) override; Status - GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) override; + GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override; Status - GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) override; + GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) override; Status - UpdateTableFile(TableFileSchema& file_schema) override; + UpdateTableFile(SegmentSchema& file_schema) override; Status - UpdateTableFilesToIndex(const std::string& table_id) override; + UpdateTableFilesToIndex(const std::string& collection_id) override; Status - UpdateTableFiles(TableFilesSchema& files) override; + UpdateTableFiles(SegmentsSchema& files) override; Status - UpdateTableFilesRowCount(TableFilesSchema& files) override; + UpdateTableFilesRowCount(SegmentsSchema& files) override; Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) override; + DescribeTableIndex(const std::string& collection_id, TableIndex& index) override; Status - DropTableIndex(const std::string& table_id) override; + DropTableIndex(const std::string& collection_id) override; Status - CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag, + CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag, uint64_t lsn) override; Status DropPartition(const std::string& partition_name) override; Status - ShowPartitions(const std::string& table_id, std::vector& partition_schema_array) override; + ShowPartitions(const std::string& collection_id, + std::vector& partition_schema_array) override; Status - GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override; + GetPartitionName(const std::string& collection_id, const std::string& tag, std::string& partition_name) override; Status - FilesToSearch(const std::string& table_id, TableFilesSchema& files) override; + FilesToSearch(const std::string& collection_id, SegmentsSchema& files) override; Status - FilesToMerge(const std::string& table_id, TableFilesSchema& files) override; + FilesToMerge(const std::string& collection_id, SegmentsSchema& files) override; Status - FilesToIndex(TableFilesSchema&) override; + FilesToIndex(SegmentsSchema&) override; Status - FilesByType(const std::string& table_id, const std::vector& file_types, TableFilesSchema& files) override; + FilesByType(const std::string& collection_id, const std::vector& file_types, SegmentsSchema& files) override; Status - FilesByID(const std::vector& ids, TableFilesSchema& files) override; + FilesByID(const std::vector& ids, SegmentsSchema& files) override; Status Size(uint64_t& result) override; @@ -134,7 +136,7 @@ class SqliteMetaImpl : public Meta { DropAll() override; Status - Count(const std::string& table_id, uint64_t& result) override; + Count(const std::string& collection_id, uint64_t& result) override; Status SetGlobalLastLSN(uint64_t lsn) override; @@ -146,7 +148,7 @@ class SqliteMetaImpl : public Meta { Status NextFileId(std::string& file_id); Status - NextTableId(std::string& table_id); + NextTableId(std::string& collection_id); Status DiscardFiles(int64_t to_discard_size); diff --git a/core/src/db/wal/WalBuffer.cpp b/core/src/db/wal/WalBuffer.cpp index 48943691ce..c0a092a9cf 100644 --- a/core/src/db/wal/WalBuffer.cpp +++ b/core/src/db/wal/WalBuffer.cpp @@ -185,7 +185,7 @@ MXLogBuffer::SurplusSpace() { uint32_t MXLogBuffer::RecordSize(const MXLogRecord& record) { - return SizeOfMXLogRecordHeader + (uint32_t)record.table_id.size() + (uint32_t)record.partition_tag.size() + + return SizeOfMXLogRecordHeader + (uint32_t)record.collection_id.size() + (uint32_t)record.partition_tag.size() + record.length * (uint32_t)sizeof(IDNumber) + record.data_size; } @@ -218,7 +218,7 @@ MXLogBuffer::Append(MXLogRecord& record) { MXLogRecordHeader head; BuildLsn(mxlog_buffer_writer_.file_no, mxlog_buffer_writer_.buf_offset + (uint32_t)record_size, head.mxl_lsn); head.mxl_type = (uint8_t)record.type; - head.table_id_size = (uint16_t)record.table_id.size(); + head.table_id_size = (uint16_t)record.collection_id.size(); head.partition_tag_size = (uint16_t)record.partition_tag.size(); head.vector_num = record.length; head.data_size = record.data_size; @@ -226,9 +226,9 @@ MXLogBuffer::Append(MXLogRecord& record) { memcpy(current_write_buf + current_write_offset, &head, SizeOfMXLogRecordHeader); current_write_offset += SizeOfMXLogRecordHeader; - if (!record.table_id.empty()) { - memcpy(current_write_buf + current_write_offset, record.table_id.data(), record.table_id.size()); - current_write_offset += record.table_id.size(); + if (!record.collection_id.empty()) { + memcpy(current_write_buf + current_write_offset, record.collection_id.data(), record.collection_id.size()); + current_write_offset += record.collection_id.size(); } if (!record.partition_tag.empty()) { @@ -307,10 +307,10 @@ MXLogBuffer::Next(const uint64_t last_applied_lsn, MXLogRecord& record) { current_read_offset += SizeOfMXLogRecordHeader; if (head->table_id_size != 0) { - record.table_id.assign(current_read_buf + current_read_offset, head->table_id_size); + record.collection_id.assign(current_read_buf + current_read_offset, head->table_id_size); current_read_offset += head->table_id_size; } else { - record.table_id = ""; + record.collection_id = ""; } if (head->partition_tag_size != 0) { diff --git a/core/src/db/wal/WalDefinations.h b/core/src/db/wal/WalDefinations.h index b6c19435a1..d4f02daee2 100644 --- a/core/src/db/wal/WalDefinations.h +++ b/core/src/db/wal/WalDefinations.h @@ -22,7 +22,7 @@ namespace milvus { namespace engine { namespace wal { -using TableSchemaPtr = std::shared_ptr; +using TableSchemaPtr = std::shared_ptr; using TableMetaPtr = std::shared_ptr >; #define UNIT_MB (1024 * 1024) @@ -33,7 +33,7 @@ enum class MXLogType { InsertBinary, InsertVector, Delete, Update, Flush, None } struct MXLogRecord { uint64_t lsn; MXLogType type; - std::string table_id; + std::string collection_id; std::string partition_tag; uint32_t length; const IDNumber* ids; diff --git a/core/src/db/wal/WalManager.cpp b/core/src/db/wal/WalManager.cpp index aed2bd99fc..c4a9435c14 100644 --- a/core/src/db/wal/WalManager.cpp +++ b/core/src/db/wal/WalManager.cpp @@ -61,7 +61,7 @@ WalManager::Init(const meta::MetaPtr& meta) { if (meta != nullptr) { meta->GetGlobalLastLSN(recovery_start); - std::vector table_schema_array; + std::vector table_schema_array; auto status = meta->AllTables(table_schema_array); if (!status.ok()) { return WAL_META_ERROR; @@ -89,7 +89,7 @@ WalManager::Init(const meta::MetaPtr& meta) { for (auto& schema : table_schema_array) { TableLsn tb_lsn = {schema.flush_lsn_, applied_lsn}; - tables_[schema.table_id_] = tb_lsn; + tables_[schema.collection_id_] = tb_lsn; } } } @@ -140,7 +140,7 @@ WalManager::GetNextRecovery(MXLogRecord& record) { // background thread has not started. // so, needn't lock here. - auto it = tables_.find(record.table_id); + auto it = tables_.find(record.collection_id); if (it != tables_.end()) { if (it->second.flush_lsn < record.lsn) { break; @@ -162,11 +162,11 @@ WalManager::GetNextRecord(MXLogRecord& record) { if (p_buffer_->GetReadLsn() >= flush_info_.lsn_) { // can exec flush requirement record.type = MXLogType::Flush; - record.table_id = flush_info_.table_id_; + record.collection_id = flush_info_.collection_id_; record.lsn = flush_info_.lsn_; flush_info_.Clear(); - WAL_LOG_INFO << "record flush table " << record.table_id << " lsn " << record.lsn; + WAL_LOG_INFO << "record flush collection " << record.collection_id << " lsn " << record.lsn; return true; } } @@ -187,7 +187,7 @@ WalManager::GetNextRecord(MXLogRecord& record) { } std::lock_guard lck(mutex_); - auto it = tables_.find(record.table_id); + auto it = tables_.find(record.collection_id); if (it != tables_.end()) { if (it->second.flush_lsn < record.lsn) { break; @@ -195,41 +195,42 @@ WalManager::GetNextRecord(MXLogRecord& record) { } } - WAL_LOG_INFO << "record type " << (int32_t)record.type << " table " << record.table_id << " lsn " << record.lsn; + WAL_LOG_INFO << "record type " << (int32_t)record.type << " collection " << record.collection_id << " lsn " + << record.lsn; return error_code; } uint64_t -WalManager::CreateTable(const std::string& table_id) { - WAL_LOG_INFO << "create table " << table_id << " " << last_applied_lsn_; +WalManager::CreateTable(const std::string& collection_id) { + WAL_LOG_INFO << "create collection " << collection_id << " " << last_applied_lsn_; std::lock_guard lck(mutex_); uint64_t applied_lsn = last_applied_lsn_; - tables_[table_id] = {applied_lsn, applied_lsn}; + tables_[collection_id] = {applied_lsn, applied_lsn}; return applied_lsn; } void -WalManager::DropTable(const std::string& table_id) { - WAL_LOG_INFO << "drop table " << table_id; +WalManager::DropTable(const std::string& collection_id) { + WAL_LOG_INFO << "drop collection " << collection_id; std::lock_guard lck(mutex_); - tables_.erase(table_id); + tables_.erase(collection_id); } void -WalManager::TableFlushed(const std::string& table_id, uint64_t lsn) { +WalManager::TableFlushed(const std::string& collection_id, uint64_t lsn) { std::unique_lock lck(mutex_); - auto it = tables_.find(table_id); + auto it = tables_.find(collection_id); if (it != tables_.end()) { it->second.flush_lsn = lsn; } lck.unlock(); - WAL_LOG_INFO << table_id << " is flushed by lsn " << lsn; + WAL_LOG_INFO << collection_id << " is flushed by lsn " << lsn; } template bool -WalManager::Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids, +WalManager::Insert(const std::string& collection_id, const std::string& partition_tag, const IDNumbers& vector_ids, const std::vector& vectors) { MXLogType log_type; if (std::is_same::value) { @@ -247,11 +248,11 @@ WalManager::Insert(const std::string& table_id, const std::string& partition_tag } size_t dim = vectors.size() / vector_num; size_t unit_size = dim * sizeof(T) + sizeof(IDNumber); - size_t head_size = SizeOfMXLogRecordHeader + table_id.length() + partition_tag.length(); + size_t head_size = SizeOfMXLogRecordHeader + collection_id.length() + partition_tag.length(); MXLogRecord record; record.type = log_type; - record.table_id = table_id; + record.collection_id = collection_id; record.partition_tag = partition_tag; uint64_t new_lsn = 0; @@ -283,19 +284,19 @@ WalManager::Insert(const std::string& table_id, const std::string& partition_tag std::unique_lock lck(mutex_); last_applied_lsn_ = new_lsn; - auto it = tables_.find(table_id); + auto it = tables_.find(collection_id); if (it != tables_.end()) { it->second.wal_lsn = new_lsn; } lck.unlock(); - WAL_LOG_INFO << table_id << " insert in part " << partition_tag << " with lsn " << new_lsn; + WAL_LOG_INFO << collection_id << " insert in part " << partition_tag << " with lsn " << new_lsn; return p_meta_handler_->SetMXLogInternalMeta(new_lsn); } bool -WalManager::DeleteById(const std::string& table_id, const IDNumbers& vector_ids) { +WalManager::DeleteById(const std::string& collection_id, const IDNumbers& vector_ids) { size_t vector_num = vector_ids.size(); if (vector_num == 0) { WAL_LOG_ERROR << "The ids is empty."; @@ -303,11 +304,11 @@ WalManager::DeleteById(const std::string& table_id, const IDNumbers& vector_ids) } size_t unit_size = sizeof(IDNumber); - size_t head_size = SizeOfMXLogRecordHeader + table_id.length(); + size_t head_size = SizeOfMXLogRecordHeader + collection_id.length(); MXLogRecord record; record.type = MXLogType::Delete; - record.table_id = table_id; + record.collection_id = collection_id; record.partition_tag = ""; uint64_t new_lsn = 0; @@ -335,26 +336,26 @@ WalManager::DeleteById(const std::string& table_id, const IDNumbers& vector_ids) std::unique_lock lck(mutex_); last_applied_lsn_ = new_lsn; - auto it = tables_.find(table_id); + auto it = tables_.find(collection_id); if (it != tables_.end()) { it->second.wal_lsn = new_lsn; } lck.unlock(); - WAL_LOG_INFO << table_id << " delete rows by id, lsn " << new_lsn; + WAL_LOG_INFO << collection_id << " delete rows by id, lsn " << new_lsn; return p_meta_handler_->SetMXLogInternalMeta(new_lsn); } uint64_t -WalManager::Flush(const std::string& table_id) { +WalManager::Flush(const std::string& collection_id) { std::lock_guard lck(mutex_); // At most one flush requirement is waiting at any time. // Otherwise, flush_info_ should be modified to a list. __glibcxx_assert(!flush_info_.IsValid()); uint64_t lsn = 0; - if (table_id.empty()) { + if (collection_id.empty()) { // flush all tables for (auto& it : tables_) { if (it.second.wal_lsn > it.second.flush_lsn) { @@ -364,8 +365,8 @@ WalManager::Flush(const std::string& table_id) { } } else { - // flush one table - auto it = tables_.find(table_id); + // flush one collection + auto it = tables_.find(collection_id); if (it != tables_.end()) { if (it->second.wal_lsn > it->second.flush_lsn) { lsn = it->second.wal_lsn; @@ -374,11 +375,11 @@ WalManager::Flush(const std::string& table_id) { } if (lsn != 0) { - flush_info_.table_id_ = table_id; + flush_info_.collection_id_ = collection_id; flush_info_.lsn_ = lsn; } - WAL_LOG_INFO << table_id << " want to be flush, lsn " << lsn; + WAL_LOG_INFO << collection_id << " want to be flush, lsn " << lsn; return lsn; } @@ -391,12 +392,12 @@ WalManager::RemoveOldFiles(uint64_t flushed_lsn) { } template bool -WalManager::Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids, - const std::vector& vectors); +WalManager::Insert(const std::string& collection_id, const std::string& partition_tag, + const IDNumbers& vector_ids, const std::vector& vectors); template bool -WalManager::Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids, - const std::vector& vectors); +WalManager::Insert(const std::string& collection_id, const std::string& partition_tag, + const IDNumbers& vector_ids, const std::vector& vectors); } // namespace wal } // namespace engine diff --git a/core/src/db/wal/WalManager.h b/core/src/db/wal/WalManager.h index 67bcf4cc72..b1572f1403 100644 --- a/core/src/db/wal/WalManager.h +++ b/core/src/db/wal/WalManager.h @@ -57,57 +57,57 @@ class WalManager { GetNextRecord(MXLogRecord& record); /* - * Create table - * @param table_id: table id + * Create collection + * @param collection_id: collection id * @retval lsn */ uint64_t - CreateTable(const std::string& table_id); + CreateTable(const std::string& collection_id); /* - * Drop table - * @param table_id: table id + * Drop collection + * @param collection_id: collection id * @retval none */ void - DropTable(const std::string& table_id); + DropTable(const std::string& collection_id); /* - * Table is flushed - * @param table_id: table id + * Collection is flushed + * @param collection_id: collection id * @param lsn: flushed lsn */ void - TableFlushed(const std::string& table_id, uint64_t lsn); + TableFlushed(const std::string& collection_id, uint64_t lsn); /* * Insert - * @param table_id: table id - * @param table_id: partition tag + * @param collection_id: collection id + * @param collection_id: partition tag * @param vector_ids: vector ids * @param vectors: vectors */ template bool - Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids, + Insert(const std::string& collection_id, const std::string& partition_tag, const IDNumbers& vector_ids, const std::vector& vectors); /* * Insert - * @param table_id: table id + * @param collection_id: collection id * @param vector_ids: vector ids */ bool - DeleteById(const std::string& table_id, const IDNumbers& vector_ids); + DeleteById(const std::string& collection_id, const IDNumbers& vector_ids); /* * Get flush lsn - * @param table_id: table id (empty means all tables) + * @param collection_id: collection id (empty means all tables) * @retval if there is something not flushed, return lsn; * else, return 0 */ uint64_t - Flush(const std::string& table_id = ""); + Flush(const std::string& collection_id = ""); void RemoveOldFiles(uint64_t flushed_lsn); @@ -131,7 +131,7 @@ class WalManager { // if multi-thread call Flush(), use list struct FlushInfo { - std::string table_id_; + std::string collection_id_; uint64_t lsn_ = 0; bool @@ -147,12 +147,12 @@ class WalManager { }; extern template bool -WalManager::Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids, - const std::vector& vectors); +WalManager::Insert(const std::string& collection_id, const std::string& partition_tag, + const IDNumbers& vector_ids, const std::vector& vectors); extern template bool -WalManager::Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids, - const std::vector& vectors); +WalManager::Insert(const std::string& collection_id, const std::string& partition_tag, + const IDNumbers& vector_ids, const std::vector& vectors); } // namespace wal } // namespace engine diff --git a/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.cpp b/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.cpp index 2de15290e2..f2803246ba 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.cpp +++ b/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.cpp @@ -23,7 +23,7 @@ AdapterMgr::GetAdapter(const IndexType type) { RegisterAdapter(); try { - return table_.at(type)(); + return collection_.at(type)(); } catch (...) { KNOWHERE_THROW_MSG("Can not find this type of confadapter"); } diff --git a/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.h b/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.h index 15ca5c49cc..5d8c24f322 100644 --- a/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.h +++ b/core/src/index/knowhere/knowhere/index/vector_index/ConfAdapterMgr.h @@ -26,7 +26,7 @@ class AdapterMgr { template struct register_t { explicit register_t(const IndexType type) { - AdapterMgr::GetInstance().table_[type] = ([] { return std::make_shared(); }); + AdapterMgr::GetInstance().collection_[type] = ([] { return std::make_shared(); }); } }; @@ -44,7 +44,7 @@ class AdapterMgr { protected: bool init_ = false; - std::unordered_map> table_; + std::unordered_map> collection_; }; } // namespace knowhere diff --git a/core/src/metrics/Metrics.h b/core/src/metrics/Metrics.h index 168ac9f722..42f5ae4ba2 100644 --- a/core/src/metrics/Metrics.h +++ b/core/src/metrics/Metrics.h @@ -186,11 +186,11 @@ class CollectDurationMetrics : CollectMetricsBase { ~CollectDurationMetrics() { auto total_time = TimeFromBegine(); switch (index_type_) { - case engine::meta::TableFileSchema::RAW: { + case engine::meta::SegmentSchema::RAW: { server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time); break; } - case engine::meta::TableFileSchema::TO_INDEX: { + case engine::meta::SegmentSchema::TO_INDEX: { server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time); break; } @@ -214,11 +214,11 @@ class CollectSearchTaskMetrics : CollectMetricsBase { ~CollectSearchTaskMetrics() { auto total_time = TimeFromBegine(); switch (index_type_) { - case engine::meta::TableFileSchema::RAW: { + case engine::meta::SegmentSchema::RAW: { server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time); break; } - case engine::meta::TableFileSchema::TO_INDEX: { + case engine::meta::SegmentSchema::TO_INDEX: { server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time); break; } diff --git a/core/src/metrics/prometheus/PrometheusMetrics.h b/core/src/metrics/prometheus/PrometheusMetrics.h index 59c7f4261d..e44c7a5431 100644 --- a/core/src/metrics/prometheus/PrometheusMetrics.h +++ b/core/src/metrics/prometheus/PrometheusMetrics.h @@ -440,7 +440,7 @@ class PrometheusMetrics : public MetricsBase { prometheus::Histogram& all_build_index_duration_seconds_histogram_ = all_build_index_duration_seconds_.Add({}, BucketBoundaries{2e6, 4e6, 6e6, 8e6, 1e7}); - // record duration of merging mem table + // record duration of merging mem collection prometheus::Family& mem_table_merge_duration_seconds_ = prometheus::BuildHistogram() .Name("mem_table_merge_duration_microseconds") diff --git a/core/src/scheduler/Definition.h b/core/src/scheduler/Definition.h index a8e34d40e6..6c32d6e0c1 100644 --- a/core/src/scheduler/Definition.h +++ b/core/src/scheduler/Definition.h @@ -27,8 +27,8 @@ namespace milvus { namespace scheduler { -using TableFileSchemaPtr = engine::meta::TableFileSchemaPtr; -using TableFileSchema = engine::meta::TableFileSchema; +using SegmentSchemaPtr = engine::meta::SegmentSchemaPtr; +using SegmentSchema = engine::meta::SegmentSchema; using ExecutionEnginePtr = engine::ExecutionEnginePtr; using EngineFactory = engine::EngineFactory; diff --git a/core/src/scheduler/TaskTable.cpp b/core/src/scheduler/TaskTable.cpp index 142832a5ea..edd2f6aa1a 100644 --- a/core/src/scheduler/TaskTable.cpp +++ b/core/src/scheduler/TaskTable.cpp @@ -153,22 +153,22 @@ TaskTable::PickToLoad(uint64_t limit) { std::vector indexes; bool cross = false; - uint64_t available_begin = table_.front() + 1; - for (uint64_t i = 0, loaded_count = 0, pick_count = 0; i < table_.size() && pick_count < limit; ++i) { + uint64_t available_begin = collection_.front() + 1; + for (uint64_t i = 0, loaded_count = 0, pick_count = 0; i < collection_.size() && pick_count < limit; ++i) { auto index = available_begin + i; - if (not table_[index]) + if (not collection_[index]) break; - if (index % table_.capacity() == table_.rear()) + if (index % collection_.capacity() == collection_.rear()) break; - if (not cross && table_[index]->IsFinish()) { - table_.set_front(index); - } else if (table_[index]->state == TaskTableItemState::LOADED) { + if (not cross && collection_[index]->IsFinish()) { + collection_.set_front(index); + } else if (collection_[index]->state == TaskTableItemState::LOADED) { cross = true; ++loaded_count; if (loaded_count > 2) return std::vector(); - } else if (table_[index]->state == TaskTableItemState::START) { - auto task = table_[index]->task; + } else if (collection_[index]->state == TaskTableItemState::START) { + auto task = collection_[index]->task; // if task is a build index task, limit it if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") { @@ -186,18 +186,19 @@ TaskTable::PickToLoad(uint64_t limit) { return indexes; #else size_t count = 0; - for (uint64_t j = last_finish_ + 1; j < table_.size(); ++j) { - if (not table_[j]) { - SERVER_LOG_WARNING << "table[" << j << "] is nullptr"; + for (uint64_t j = last_finish_ + 1; j < collection_.size(); ++j) { + if (not collection_[j]) { + SERVER_LOG_WARNING << "collection[" << j << "] is nullptr"; } - if (table_[j]->task->path().Current() == "cpu") { - if (table_[j]->task->Type() == TaskType::BuildIndexTask && BuildMgrInst::GetInstance()->numoftasks() < 1) { + if (collection_[j]->task->path().Current() == "cpu") { + if (collection_[j]->task->Type() == TaskType::BuildIndexTask && + BuildMgrInst::GetInstance()->numoftasks() < 1) { return std::vector(); } } - if (table_[j]->state == TaskTableItemState::LOADED) { + if (collection_[j]->state == TaskTableItemState::LOADED) { ++count; if (count > 2) return std::vector(); @@ -206,11 +207,11 @@ TaskTable::PickToLoad(uint64_t limit) { std::vector indexes; bool cross = false; - for (uint64_t i = last_finish_ + 1, count = 0; i < table_.size() && count < limit; ++i) { - if (not cross && table_[i]->IsFinish()) { + for (uint64_t i = last_finish_ + 1, count = 0; i < collection_.size() && count < limit; ++i) { + if (not cross && collection_[i]->IsFinish()) { last_finish_ = i; - } else if (table_[i]->state == TaskTableItemState::START) { - auto task = table_[i]->task; + } else if (collection_[i]->state == TaskTableItemState::START) { + auto task = collection_[i]->task; if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") { if (BuildMgrInst::GetInstance()->numoftasks() == 0) { break; @@ -236,19 +237,19 @@ TaskTable::PickToExecute(uint64_t limit) { // TimeRecorder rc(""); std::vector indexes; bool cross = false; - uint64_t available_begin = table_.front() + 1; - for (uint64_t i = 0, pick_count = 0; i < table_.size() && pick_count < limit; ++i) { + uint64_t available_begin = collection_.front() + 1; + for (uint64_t i = 0, pick_count = 0; i < collection_.size() && pick_count < limit; ++i) { uint64_t index = available_begin + i; - if (not table_[index]) { + if (not collection_[index]) { break; } - if (index % table_.capacity() == table_.rear()) { + if (index % collection_.capacity() == collection_.rear()) { break; } - if (not cross && table_[index]->IsFinish()) { - table_.set_front(index); - } else if (table_[index]->state == TaskTableItemState::LOADED) { + if (not cross && collection_[index]->IsFinish()) { + collection_.set_front(index); + } else if (collection_[index]->state == TaskTableItemState::LOADED) { cross = true; indexes.push_back(index); ++pick_count; @@ -265,7 +266,7 @@ TaskTable::Put(TaskPtr task, TaskTableItemPtr from) { item->task = std::move(task); item->state = TaskTableItemState::START; item->timestamp.start = get_current_timestamp(); - table_.put(std::move(item)); + collection_.put(std::move(item)); if (subscriber_) { subscriber_(); } @@ -274,10 +275,10 @@ TaskTable::Put(TaskPtr task, TaskTableItemPtr from) { size_t TaskTable::TaskToExecute() { size_t count = 0; - auto begin = table_.front() + 1; - for (size_t i = 0; i < table_.size(); ++i) { + auto begin = collection_.front() + 1; + for (size_t i = 0; i < collection_.size(); ++i) { auto index = begin + i; - if (table_[index] && table_[index]->state == TaskTableItemState::LOADED) { + if (collection_[index] && collection_[index]->state == TaskTableItemState::LOADED) { ++count; } } diff --git a/core/src/scheduler/TaskTable.h b/core/src/scheduler/TaskTable.h index 1dfb13efe0..b60fe5a9c8 100644 --- a/core/src/scheduler/TaskTable.h +++ b/core/src/scheduler/TaskTable.h @@ -97,7 +97,7 @@ struct TaskTableItem : public interface::dumpable { class TaskTable : public interface::dumpable { public: - TaskTable() : table_(1ULL << 16ULL) { + TaskTable() : collection_(1ULL << 16ULL) { } TaskTable(const TaskTable&) = delete; @@ -127,22 +127,22 @@ class TaskTable : public interface::dumpable { public: inline const TaskTableItemPtr& operator[](uint64_t index) { - return table_[index]; + return collection_[index]; } inline const TaskTableItemPtr& at(uint64_t index) { - return table_[index]; + return collection_[index]; } inline size_t capacity() { - return table_.capacity(); + return collection_.capacity(); } inline size_t size() { - return table_.size(); + return collection_.size(); } public: @@ -156,7 +156,7 @@ class TaskTable : public interface::dumpable { */ inline bool Load(uint64_t index) { - return table_[index]->Load(); + return collection_[index]->Load(); } /* @@ -166,7 +166,7 @@ class TaskTable : public interface::dumpable { */ inline bool Loaded(uint64_t index) { - return table_[index]->Loaded(); + return collection_[index]->Loaded(); } /* @@ -176,7 +176,7 @@ class TaskTable : public interface::dumpable { */ inline bool Execute(uint64_t index) { - return table_[index]->Execute(); + return collection_[index]->Execute(); } /* @@ -186,7 +186,7 @@ class TaskTable : public interface::dumpable { */ inline bool Executed(uint64_t index) { - return table_[index]->Executed(); + return collection_[index]->Executed(); } /* @@ -197,7 +197,7 @@ class TaskTable : public interface::dumpable { inline bool Move(uint64_t index) { - return table_[index]->Move(); + return collection_[index]->Move(); } /* @@ -207,12 +207,12 @@ class TaskTable : public interface::dumpable { */ inline bool Moved(uint64_t index) { - return table_[index]->Moved(); + return collection_[index]->Moved(); } private: std::uint64_t id_ = 0; - CircleQueue table_; + CircleQueue collection_; std::function subscriber_ = nullptr; // cache last finish avoid Pick task from begin always diff --git a/core/src/scheduler/job/BuildIndexJob.cpp b/core/src/scheduler/job/BuildIndexJob.cpp index 4a49881028..c2a88f9ea2 100644 --- a/core/src/scheduler/job/BuildIndexJob.cpp +++ b/core/src/scheduler/job/BuildIndexJob.cpp @@ -25,7 +25,7 @@ BuildIndexJob::BuildIndexJob(engine::meta::MetaPtr meta_ptr, engine::DBOptions o } bool -BuildIndexJob::AddToIndexFiles(const engine::meta::TableFileSchemaPtr& to_index_file) { +BuildIndexJob::AddToIndexFiles(const engine::meta::SegmentSchemaPtr& to_index_file) { std::unique_lock lock(mutex_); if (to_index_file == nullptr || to_index_files_.find(to_index_file->id_) != to_index_files_.end()) { return false; diff --git a/core/src/scheduler/job/BuildIndexJob.h b/core/src/scheduler/job/BuildIndexJob.h index 126877a94f..cd8b802dea 100644 --- a/core/src/scheduler/job/BuildIndexJob.h +++ b/core/src/scheduler/job/BuildIndexJob.h @@ -29,10 +29,10 @@ namespace milvus { namespace scheduler { -using engine::meta::TableFileSchemaPtr; +using engine::meta::SegmentSchemaPtr; -using Id2ToIndexMap = std::unordered_map; -using Id2ToTableFileMap = std::unordered_map; +using Id2ToIndexMap = std::unordered_map; +using Id2ToTableFileMap = std::unordered_map; class BuildIndexJob : public Job, public server::CacheConfigHandler { public: @@ -42,7 +42,7 @@ class BuildIndexJob : public Job, public server::CacheConfigHandler { public: bool - AddToIndexFiles(const TableFileSchemaPtr& to_index_file); + AddToIndexFiles(const SegmentSchemaPtr& to_index_file); void WaitBuildIndexFinish(); diff --git a/core/src/scheduler/job/DeleteJob.cpp b/core/src/scheduler/job/DeleteJob.cpp index 97e9bbf90d..a519af40c1 100644 --- a/core/src/scheduler/job/DeleteJob.cpp +++ b/core/src/scheduler/job/DeleteJob.cpp @@ -16,9 +16,9 @@ namespace milvus { namespace scheduler { -DeleteJob::DeleteJob(std::string table_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource) +DeleteJob::DeleteJob(std::string collection_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource) : Job(JobType::DELETE), - table_id_(std::move(table_id)), + collection_id_(std::move(collection_id)), meta_ptr_(std::move(meta_ptr)), num_resource_(num_resource) { } @@ -27,7 +27,7 @@ void DeleteJob::WaitAndDelete() { std::unique_lock lock(mutex_); cv_.wait(lock, [&] { return done_resource == num_resource_; }); - meta_ptr_->DeleteTableFiles(table_id_); + meta_ptr_->DeleteTableFiles(collection_id_); } void @@ -42,7 +42,7 @@ DeleteJob::ResourceDone() { json DeleteJob::Dump() const { json ret{ - {"table_id", table_id_}, + {"collection_id", collection_id_}, {"number_of_resource", num_resource_}, {"number_of_done", done_resource}, }; diff --git a/core/src/scheduler/job/DeleteJob.h b/core/src/scheduler/job/DeleteJob.h index f0bb328437..c1e96a221e 100644 --- a/core/src/scheduler/job/DeleteJob.h +++ b/core/src/scheduler/job/DeleteJob.h @@ -29,7 +29,7 @@ namespace scheduler { class DeleteJob : public Job { public: - DeleteJob(std::string table_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource); + DeleteJob(std::string collection_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource); public: void @@ -43,8 +43,8 @@ class DeleteJob : public Job { public: std::string - table_id() const { - return table_id_; + collection_id() const { + return collection_id_; } engine::meta::MetaPtr @@ -53,7 +53,7 @@ class DeleteJob : public Job { } private: - std::string table_id_; + std::string collection_id_; engine::meta::MetaPtr meta_ptr_; uint64_t num_resource_ = 0; diff --git a/core/src/scheduler/job/SearchJob.cpp b/core/src/scheduler/job/SearchJob.cpp index 55655eb8ff..a83bffa1b7 100644 --- a/core/src/scheduler/job/SearchJob.cpp +++ b/core/src/scheduler/job/SearchJob.cpp @@ -22,7 +22,7 @@ SearchJob::SearchJob(const std::shared_ptr& context, uint64_t t } bool -SearchJob::AddIndexFile(const TableFileSchemaPtr& index_file) { +SearchJob::AddIndexFile(const SegmentSchemaPtr& index_file) { std::unique_lock lock(mutex_); if (index_file == nullptr || index_files_.find(index_file->id_) != index_files_.end()) { return false; diff --git a/core/src/scheduler/job/SearchJob.h b/core/src/scheduler/job/SearchJob.h index 8ae0fe77ba..baf24fe4bb 100644 --- a/core/src/scheduler/job/SearchJob.h +++ b/core/src/scheduler/job/SearchJob.h @@ -31,9 +31,9 @@ namespace milvus { namespace scheduler { -using engine::meta::TableFileSchemaPtr; +using engine::meta::SegmentSchemaPtr; -using Id2IndexMap = std::unordered_map; +using Id2IndexMap = std::unordered_map; using ResultIds = engine::ResultIds; using ResultDistances = engine::ResultDistances; @@ -45,7 +45,7 @@ class SearchJob : public Job { public: bool - AddIndexFile(const TableFileSchemaPtr& index_file); + AddIndexFile(const SegmentSchemaPtr& index_file); void WaitResult(); diff --git a/core/src/scheduler/task/BuildIndexTask.cpp b/core/src/scheduler/task/BuildIndexTask.cpp index 2fa02567fe..c9e39e6623 100644 --- a/core/src/scheduler/task/BuildIndexTask.cpp +++ b/core/src/scheduler/task/BuildIndexTask.cpp @@ -31,13 +31,13 @@ namespace milvus { namespace scheduler { -XBuildIndexTask::XBuildIndexTask(TableFileSchemaPtr file, TaskLabelPtr label) +XBuildIndexTask::XBuildIndexTask(SegmentSchemaPtr file, TaskLabelPtr label) : Task(TaskType::BuildIndexTask, std::move(label)), file_(file) { if (file_) { EngineType engine_type; - if (file->file_type_ == TableFileSchema::FILE_TYPE::RAW || - file->file_type_ == TableFileSchema::FILE_TYPE::TO_INDEX || - file->file_type_ == TableFileSchema::FILE_TYPE::BACKUP) { + if (file->file_type_ == SegmentSchema::FILE_TYPE::RAW || + file->file_type_ == SegmentSchema::FILE_TYPE::TO_INDEX || + file->file_type_ == SegmentSchema::FILE_TYPE::BACKUP) { engine_type = engine::utils::IsBinaryMetricType(file->metric_type_) ? EngineType::FAISS_BIN_IDMAP : EngineType::FAISS_IDMAP; } else { @@ -122,18 +122,18 @@ XBuildIndexTask::Execute() { EngineType engine_type = (EngineType)file_->engine_type_; std::shared_ptr index; - // step 2: create table file - engine::meta::TableFileSchema table_file; - table_file.table_id_ = file_->table_id_; + // step 2: create collection file + engine::meta::SegmentSchema table_file; + table_file.collection_id_ = file_->collection_id_; table_file.segment_id_ = file_->file_id_; table_file.date_ = file_->date_; - table_file.file_type_ = engine::meta::TableFileSchema::NEW_INDEX; + table_file.file_type_ = engine::meta::SegmentSchema::NEW_INDEX; engine::meta::MetaPtr meta_ptr = build_index_job->meta(); Status status = meta_ptr->CreateTableFile(table_file); fiu_do_on("XBuildIndexTask.Execute.create_table_success", status = Status::OK()); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to create table file: " << status.ToString(); + ENGINE_LOG_ERROR << "Failed to create collection file: " << status.ToString(); build_index_job->BuildIndexDone(to_index_id_); build_index_job->GetStatus() = status; to_index_engine_ = nullptr; @@ -152,7 +152,7 @@ XBuildIndexTask::Execute() { std::string msg = "Build index exception: " + std::string(ex.what()); ENGINE_LOG_ERROR << msg; - table_file.file_type_ = engine::meta::TableFileSchema::TO_DELETE; + table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE; status = meta_ptr->UpdateTableFile(table_file); ENGINE_LOG_DEBUG << "Build index fail, mark file: " << table_file.file_id_ << " to to_delete"; @@ -162,16 +162,16 @@ XBuildIndexTask::Execute() { return; } - // step 4: if table has been deleted, dont save index file + // step 4: if collection has been deleted, dont save index file bool has_table = false; - meta_ptr->HasTable(file_->table_id_, has_table); + meta_ptr->HasTable(file_->collection_id_, has_table); fiu_do_on("XBuildIndexTask.Execute.has_table", has_table = true); if (!has_table) { - meta_ptr->DeleteTableFiles(file_->table_id_); + meta_ptr->DeleteTableFiles(file_->collection_id_); build_index_job->BuildIndexDone(to_index_id_); - build_index_job->GetStatus() = Status(DB_ERROR, "Table has been deleted, discard index file."); + build_index_job->GetStatus() = Status(DB_ERROR, "Collection has been deleted, discard index file."); to_index_engine_ = nullptr; return; } @@ -193,7 +193,7 @@ XBuildIndexTask::Execute() { if (!status.ok()) { // if failed to serialize index file to disk // typical error: out of disk space, out of memory or permition denied - table_file.file_type_ = engine::meta::TableFileSchema::TO_DELETE; + table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE; status = meta_ptr->UpdateTableFile(table_file); ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete"; @@ -207,14 +207,14 @@ XBuildIndexTask::Execute() { } // step 6: update meta - table_file.file_type_ = engine::meta::TableFileSchema::INDEX; + table_file.file_type_ = engine::meta::SegmentSchema::INDEX; table_file.file_size_ = server::CommonUtil::GetFileSize(table_file.location_); table_file.row_count_ = file_->row_count_; // index->Count(); auto origin_file = *file_; - origin_file.file_type_ = engine::meta::TableFileSchema::BACKUP; + origin_file.file_type_ = engine::meta::SegmentSchema::BACKUP; - engine::meta::TableFilesSchema update_files = {table_file, origin_file}; + engine::meta::SegmentsSchema update_files = {table_file, origin_file}; if (status.ok()) { // makesure index file is sucessfully serialized to disk status = meta_ptr->UpdateTableFiles(update_files); @@ -230,11 +230,11 @@ XBuildIndexTask::Execute() { } } else { // failed to update meta, mark the new file as to_delete, don't delete old file - origin_file.file_type_ = engine::meta::TableFileSchema::TO_INDEX; + origin_file.file_type_ = engine::meta::SegmentSchema::TO_INDEX; status = meta_ptr->UpdateTableFile(origin_file); ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << origin_file.file_id_ << " to to_index"; - table_file.file_type_ = engine::meta::TableFileSchema::TO_DELETE; + table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE; status = meta_ptr->UpdateTableFile(table_file); ENGINE_LOG_DEBUG << "Failed to up date file to index, mark file: " << table_file.file_id_ << " to to_delete"; diff --git a/core/src/scheduler/task/BuildIndexTask.h b/core/src/scheduler/task/BuildIndexTask.h index a2211141a6..1f9d704949 100644 --- a/core/src/scheduler/task/BuildIndexTask.h +++ b/core/src/scheduler/task/BuildIndexTask.h @@ -20,7 +20,7 @@ namespace scheduler { class XBuildIndexTask : public Task { public: - explicit XBuildIndexTask(TableFileSchemaPtr file, TaskLabelPtr label); + explicit XBuildIndexTask(SegmentSchemaPtr file, TaskLabelPtr label); void Load(LoadType type, uint8_t device_id) override; @@ -29,8 +29,8 @@ class XBuildIndexTask : public Task { Execute() override; public: - TableFileSchemaPtr file_; - TableFileSchema table_file_; + SegmentSchemaPtr file_; + SegmentSchema table_file_; size_t to_index_id_ = 0; int to_index_type_ = 0; ExecutionEnginePtr to_index_engine_ = nullptr; diff --git a/core/src/scheduler/task/SearchTask.cpp b/core/src/scheduler/task/SearchTask.cpp index 9394cc55cd..3609715535 100644 --- a/core/src/scheduler/task/SearchTask.cpp +++ b/core/src/scheduler/task/SearchTask.cpp @@ -83,8 +83,8 @@ void CollectFileMetrics(int file_type, size_t file_size) { server::MetricsBase& inst = server::Metrics::GetInstance(); switch (file_type) { - case TableFileSchema::RAW: - case TableFileSchema::TO_INDEX: { + case SegmentSchema::RAW: + case SegmentSchema::TO_INDEX: { inst.RawFileSizeHistogramObserve(file_size); inst.RawFileSizeTotalIncrement(file_size); inst.RawFileSizeGaugeSet(file_size); @@ -99,7 +99,7 @@ CollectFileMetrics(int file_type, size_t file_size) { } } -XSearchTask::XSearchTask(const std::shared_ptr& context, TableFileSchemaPtr file, TaskLabelPtr label) +XSearchTask::XSearchTask(const std::shared_ptr& context, SegmentSchemaPtr file, TaskLabelPtr label) : Task(TaskType::SearchTask, std::move(label)), context_(context), file_(file) { if (file_) { // distance -- value 0 means two vectors equal, ascending reduce, L2/HAMMING/JACCARD/TONIMOTO ... @@ -110,9 +110,9 @@ XSearchTask::XSearchTask(const std::shared_ptr& context, TableF } EngineType engine_type; - if (file->file_type_ == TableFileSchema::FILE_TYPE::RAW || - file->file_type_ == TableFileSchema::FILE_TYPE::TO_INDEX || - file->file_type_ == TableFileSchema::FILE_TYPE::BACKUP) { + if (file->file_type_ == SegmentSchema::FILE_TYPE::RAW || + file->file_type_ == SegmentSchema::FILE_TYPE::TO_INDEX || + file->file_type_ == SegmentSchema::FILE_TYPE::BACKUP) { engine_type = engine::utils::IsBinaryMetricType(file->metric_type_) ? EngineType::FAISS_BIN_IDMAP : EngineType::FAISS_IDMAP; } else { diff --git a/core/src/scheduler/task/SearchTask.h b/core/src/scheduler/task/SearchTask.h index cd4a9316b2..267ab926d8 100644 --- a/core/src/scheduler/task/SearchTask.h +++ b/core/src/scheduler/task/SearchTask.h @@ -25,7 +25,7 @@ namespace scheduler { // TODO(wxyu): rewrite class XSearchTask : public Task { public: - explicit XSearchTask(const std::shared_ptr& context, TableFileSchemaPtr file, TaskLabelPtr label); + explicit XSearchTask(const std::shared_ptr& context, SegmentSchemaPtr file, TaskLabelPtr label); void Load(LoadType type, uint8_t device_id) override; @@ -53,7 +53,7 @@ class XSearchTask : public Task { public: const std::shared_ptr context_; - TableFileSchemaPtr file_; + SegmentSchemaPtr file_; size_t index_id_ = 0; int index_type_ = 0; diff --git a/core/src/scheduler/task/TestTask.cpp b/core/src/scheduler/task/TestTask.cpp index 57a4f29d82..7a382c6c2e 100644 --- a/core/src/scheduler/task/TestTask.cpp +++ b/core/src/scheduler/task/TestTask.cpp @@ -18,7 +18,7 @@ namespace milvus { namespace scheduler { -TestTask::TestTask(const std::shared_ptr& context, TableFileSchemaPtr& file, TaskLabelPtr label) +TestTask::TestTask(const std::shared_ptr& context, SegmentSchemaPtr& file, TaskLabelPtr label) : XSearchTask(context, file, std::move(label)) { } diff --git a/core/src/scheduler/task/TestTask.h b/core/src/scheduler/task/TestTask.h index d3f584f1c9..b70fea645c 100644 --- a/core/src/scheduler/task/TestTask.h +++ b/core/src/scheduler/task/TestTask.h @@ -20,7 +20,7 @@ namespace scheduler { class TestTask : public XSearchTask { public: - explicit TestTask(const std::shared_ptr& context, TableFileSchemaPtr& file, TaskLabelPtr label); + explicit TestTask(const std::shared_ptr& context, SegmentSchemaPtr& file, TaskLabelPtr label); public: void diff --git a/core/src/server/DBWrapper.cpp b/core/src/server/DBWrapper.cpp index 2aea0cc529..d9d0234c42 100644 --- a/core/src/server/DBWrapper.cpp +++ b/core/src/server/DBWrapper.cpp @@ -204,7 +204,7 @@ DBWrapper::StartService() { db_->Start(); - // preload table + // preload collection std::string preload_tables; s = config.GetDBConfigPreloadTable(preload_tables); if (!s.ok()) { @@ -237,19 +237,19 @@ DBWrapper::PreloadTables(const std::string& preload_tables) { // do nothing } else if (preload_tables == "*") { // load all tables - std::vector table_schema_array; + std::vector table_schema_array; db_->AllTables(table_schema_array); for (auto& schema : table_schema_array) { - auto status = db_->PreloadTable(schema.table_id_); + auto status = db_->PreloadTable(schema.collection_id_); if (!status.ok()) { return status; } } } else { - std::vector table_names; - StringHelpFunctions::SplitStringByDelimeter(preload_tables, ",", table_names); - for (auto& name : table_names) { + std::vector collection_names; + StringHelpFunctions::SplitStringByDelimeter(preload_tables, ",", collection_names); + for (auto& name : collection_names) { auto status = db_->PreloadTable(name); if (!status.ok()) { return status; diff --git a/core/src/server/delivery/RequestHandler.cpp b/core/src/server/delivery/RequestHandler.cpp index b416e721d0..b95e800bf1 100644 --- a/core/src/server/delivery/RequestHandler.cpp +++ b/core/src/server/delivery/RequestHandler.cpp @@ -43,62 +43,62 @@ namespace milvus { namespace server { Status -RequestHandler::CreateTable(const std::shared_ptr& context, const std::string& table_name, int64_t dimension, - int64_t index_file_size, int64_t metric_type) { +RequestHandler::CreateTable(const std::shared_ptr& context, const std::string& collection_name, + int64_t dimension, int64_t index_file_size, int64_t metric_type) { BaseRequestPtr request_ptr = - CreateTableRequest::Create(context, table_name, dimension, index_file_size, metric_type); + CreateTableRequest::Create(context, collection_name, dimension, index_file_size, metric_type); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::HasTable(const std::shared_ptr& context, const std::string& table_name, bool& has_table) { - BaseRequestPtr request_ptr = HasTableRequest::Create(context, table_name, has_table); +RequestHandler::HasTable(const std::shared_ptr& context, const std::string& collection_name, bool& has_table) { + BaseRequestPtr request_ptr = HasTableRequest::Create(context, collection_name, has_table); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::DropTable(const std::shared_ptr& context, const std::string& table_name) { - BaseRequestPtr request_ptr = DropTableRequest::Create(context, table_name); +RequestHandler::DropTable(const std::shared_ptr& context, const std::string& collection_name) { + BaseRequestPtr request_ptr = DropTableRequest::Create(context, collection_name); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::CreateIndex(const std::shared_ptr& context, const std::string& table_name, int64_t index_type, - const milvus::json& json_params) { - BaseRequestPtr request_ptr = CreateIndexRequest::Create(context, table_name, index_type, json_params); +RequestHandler::CreateIndex(const std::shared_ptr& context, const std::string& collection_name, + int64_t index_type, const milvus::json& json_params) { + BaseRequestPtr request_ptr = CreateIndexRequest::Create(context, collection_name, index_type, json_params); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::Insert(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::Insert(const std::shared_ptr& context, const std::string& collection_name, engine::VectorsData& vectors, const std::string& partition_tag) { - BaseRequestPtr request_ptr = InsertRequest::Create(context, table_name, vectors, partition_tag); + BaseRequestPtr request_ptr = InsertRequest::Create(context, collection_name, vectors, partition_tag); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::GetVectorByID(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::GetVectorByID(const std::shared_ptr& context, const std::string& collection_name, const std::vector& ids, engine::VectorsData& vectors) { - BaseRequestPtr request_ptr = GetVectorByIDRequest::Create(context, table_name, ids, vectors); + BaseRequestPtr request_ptr = GetVectorByIDRequest::Create(context, collection_name, ids, vectors); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::GetVectorIDs(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::GetVectorIDs(const std::shared_ptr& context, const std::string& collection_name, const std::string& segment_name, std::vector& vector_ids) { - BaseRequestPtr request_ptr = GetVectorIDsRequest::Create(context, table_name, segment_name, vector_ids); + BaseRequestPtr request_ptr = GetVectorIDsRequest::Create(context, collection_name, segment_name, vector_ids); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); @@ -113,49 +113,50 @@ RequestHandler::ShowTables(const std::shared_ptr& context, std::vector< } Status -RequestHandler::ShowTableInfo(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::ShowTableInfo(const std::shared_ptr& context, const std::string& collection_name, TableInfo& table_info) { - BaseRequestPtr request_ptr = ShowTableInfoRequest::Create(context, table_name, table_info); + BaseRequestPtr request_ptr = ShowTableInfoRequest::Create(context, collection_name, table_info); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::Search(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::Search(const std::shared_ptr& context, const std::string& collection_name, const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, const std::vector& file_id_list, TopKQueryResult& result) { - BaseRequestPtr request_ptr = - SearchRequest::Create(context, table_name, vectors, topk, extra_params, partition_list, file_id_list, result); + BaseRequestPtr request_ptr = SearchRequest::Create(context, collection_name, vectors, topk, extra_params, + partition_list, file_id_list, result); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::SearchByID(const std::shared_ptr& context, const std::string& table_name, int64_t vector_id, - int64_t topk, const milvus::json& extra_params, +RequestHandler::SearchByID(const std::shared_ptr& context, const std::string& collection_name, + int64_t vector_id, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, TopKQueryResult& result) { BaseRequestPtr request_ptr = - SearchByIDRequest::Create(context, table_name, vector_id, topk, extra_params, partition_list, result); + SearchByIDRequest::Create(context, collection_name, vector_id, topk, extra_params, partition_list, result); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::DescribeTable(const std::shared_ptr& context, const std::string& table_name, - TableSchema& table_schema) { - BaseRequestPtr request_ptr = DescribeTableRequest::Create(context, table_name, table_schema); +RequestHandler::DescribeTable(const std::shared_ptr& context, const std::string& collection_name, + CollectionSchema& table_schema) { + BaseRequestPtr request_ptr = DescribeTableRequest::Create(context, collection_name, table_schema); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::CountTable(const std::shared_ptr& context, const std::string& table_name, int64_t& count) { - BaseRequestPtr request_ptr = CountTableRequest::Create(context, table_name, count); +RequestHandler::CountTable(const std::shared_ptr& context, const std::string& collection_name, + int64_t& count) { + BaseRequestPtr request_ptr = CountTableRequest::Create(context, collection_name, count); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); @@ -170,77 +171,77 @@ RequestHandler::Cmd(const std::shared_ptr& context, const std::string& } Status -RequestHandler::DeleteByID(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::DeleteByID(const std::shared_ptr& context, const std::string& collection_name, const std::vector& vector_ids) { - BaseRequestPtr request_ptr = DeleteByIDRequest::Create(context, table_name, vector_ids); + BaseRequestPtr request_ptr = DeleteByIDRequest::Create(context, collection_name, vector_ids); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::PreloadTable(const std::shared_ptr& context, const std::string& table_name) { - BaseRequestPtr request_ptr = PreloadTableRequest::Create(context, table_name); +RequestHandler::PreloadTable(const std::shared_ptr& context, const std::string& collection_name) { + BaseRequestPtr request_ptr = PreloadTableRequest::Create(context, collection_name); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::DescribeIndex(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::DescribeIndex(const std::shared_ptr& context, const std::string& collection_name, IndexParam& param) { - BaseRequestPtr request_ptr = DescribeIndexRequest::Create(context, table_name, param); + BaseRequestPtr request_ptr = DescribeIndexRequest::Create(context, collection_name, param); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::DropIndex(const std::shared_ptr& context, const std::string& table_name) { - BaseRequestPtr request_ptr = DropIndexRequest::Create(context, table_name); +RequestHandler::DropIndex(const std::shared_ptr& context, const std::string& collection_name) { + BaseRequestPtr request_ptr = DropIndexRequest::Create(context, collection_name); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::CreatePartition(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::CreatePartition(const std::shared_ptr& context, const std::string& collection_name, const std::string& tag) { - BaseRequestPtr request_ptr = CreatePartitionRequest::Create(context, table_name, tag); + BaseRequestPtr request_ptr = CreatePartitionRequest::Create(context, collection_name, tag); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::ShowPartitions(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::ShowPartitions(const std::shared_ptr& context, const std::string& collection_name, std::vector& partitions) { - BaseRequestPtr request_ptr = ShowPartitionsRequest::Create(context, table_name, partitions); + BaseRequestPtr request_ptr = ShowPartitionsRequest::Create(context, collection_name, partitions); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::DropPartition(const std::shared_ptr& context, const std::string& table_name, +RequestHandler::DropPartition(const std::shared_ptr& context, const std::string& collection_name, const std::string& tag) { - BaseRequestPtr request_ptr = DropPartitionRequest::Create(context, table_name, tag); + BaseRequestPtr request_ptr = DropPartitionRequest::Create(context, collection_name, tag); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::Flush(const std::shared_ptr& context, const std::vector& table_names) { - BaseRequestPtr request_ptr = FlushRequest::Create(context, table_names); +RequestHandler::Flush(const std::shared_ptr& context, const std::vector& collection_names) { + BaseRequestPtr request_ptr = FlushRequest::Create(context, collection_names); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); } Status -RequestHandler::Compact(const std::shared_ptr& context, const std::string& table_name) { - BaseRequestPtr request_ptr = CompactRequest::Create(context, table_name); +RequestHandler::Compact(const std::shared_ptr& context, const std::string& collection_name) { + BaseRequestPtr request_ptr = CompactRequest::Create(context, collection_name); RequestScheduler::ExecRequest(request_ptr); return request_ptr->status(); diff --git a/core/src/server/delivery/RequestHandler.h b/core/src/server/delivery/RequestHandler.h index 8b9bb68b5a..b9d286a771 100644 --- a/core/src/server/delivery/RequestHandler.h +++ b/core/src/server/delivery/RequestHandler.h @@ -27,84 +27,87 @@ class RequestHandler { RequestHandler() = default; Status - CreateTable(const std::shared_ptr& context, const std::string& table_name, int64_t dimension, + CreateTable(const std::shared_ptr& context, const std::string& collection_name, int64_t dimension, int64_t index_file_size, int64_t metric_type); Status - HasTable(const std::shared_ptr& context, const std::string& table_name, bool& has_table); + HasTable(const std::shared_ptr& context, const std::string& collection_name, bool& has_table); Status - DropTable(const std::shared_ptr& context, const std::string& table_name); + DropTable(const std::shared_ptr& context, const std::string& collection_name); Status - CreateIndex(const std::shared_ptr& context, const std::string& table_name, int64_t index_type, + CreateIndex(const std::shared_ptr& context, const std::string& collection_name, int64_t index_type, const milvus::json& json_params); Status - Insert(const std::shared_ptr& context, const std::string& table_name, engine::VectorsData& vectors, + Insert(const std::shared_ptr& context, const std::string& collection_name, engine::VectorsData& vectors, const std::string& partition_tag); Status - GetVectorByID(const std::shared_ptr& context, const std::string& table_name, + GetVectorByID(const std::shared_ptr& context, const std::string& collection_name, const std::vector& ids, engine::VectorsData& vectors); Status - GetVectorIDs(const std::shared_ptr& context, const std::string& table_name, + GetVectorIDs(const std::shared_ptr& context, const std::string& collection_name, const std::string& segment_name, std::vector& vector_ids); Status ShowTables(const std::shared_ptr& context, std::vector& tables); Status - ShowTableInfo(const std::shared_ptr& context, const std::string& table_name, TableInfo& table_info); + ShowTableInfo(const std::shared_ptr& context, const std::string& collection_name, TableInfo& table_info); Status - Search(const std::shared_ptr& context, const std::string& table_name, const engine::VectorsData& vectors, - int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, - const std::vector& file_id_list, TopKQueryResult& result); + Search(const std::shared_ptr& context, const std::string& collection_name, + const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params, + const std::vector& partition_list, const std::vector& file_id_list, + TopKQueryResult& result); Status - SearchByID(const std::shared_ptr& context, const std::string& table_name, int64_t vector_id, int64_t topk, - const milvus::json& extra_params, const std::vector& partition_list, + SearchByID(const std::shared_ptr& context, const std::string& collection_name, int64_t vector_id, + int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, TopKQueryResult& result); Status - DescribeTable(const std::shared_ptr& context, const std::string& table_name, TableSchema& table_schema); + DescribeTable(const std::shared_ptr& context, const std::string& collection_name, + CollectionSchema& table_schema); Status - CountTable(const std::shared_ptr& context, const std::string& table_name, int64_t& count); + CountTable(const std::shared_ptr& context, const std::string& collection_name, int64_t& count); Status Cmd(const std::shared_ptr& context, const std::string& cmd, std::string& reply); Status - DeleteByID(const std::shared_ptr& context, const std::string& table_name, + DeleteByID(const std::shared_ptr& context, const std::string& collection_name, const std::vector& vector_ids); Status - PreloadTable(const std::shared_ptr& context, const std::string& table_name); + PreloadTable(const std::shared_ptr& context, const std::string& collection_name); Status - DescribeIndex(const std::shared_ptr& context, const std::string& table_name, IndexParam& param); + DescribeIndex(const std::shared_ptr& context, const std::string& collection_name, IndexParam& param); Status - DropIndex(const std::shared_ptr& context, const std::string& table_name); + DropIndex(const std::shared_ptr& context, const std::string& collection_name); Status - CreatePartition(const std::shared_ptr& context, const std::string& table_name, const std::string& tag); + CreatePartition(const std::shared_ptr& context, const std::string& collection_name, + const std::string& tag); Status - ShowPartitions(const std::shared_ptr& context, const std::string& table_name, + ShowPartitions(const std::shared_ptr& context, const std::string& collection_name, std::vector& partitions); Status - DropPartition(const std::shared_ptr& context, const std::string& table_name, const std::string& tag); + DropPartition(const std::shared_ptr& context, const std::string& collection_name, const std::string& tag); Status - Flush(const std::shared_ptr& context, const std::vector& table_names); + Flush(const std::shared_ptr& context, const std::vector& collection_names); Status - Compact(const std::shared_ptr& context, const std::string& table_name); + Compact(const std::shared_ptr& context, const std::string& collection_name); }; } // namespace server diff --git a/core/src/server/delivery/request/BaseRequest.cpp b/core/src/server/delivery/request/BaseRequest.cpp index 143336c93f..38e1c05493 100644 --- a/core/src/server/delivery/request/BaseRequest.cpp +++ b/core/src/server/delivery/request/BaseRequest.cpp @@ -38,7 +38,7 @@ RequestGroup(BaseRequest::RequestType type) { {BaseRequest::kGetVectorByID, INFO_REQUEST_GROUP}, {BaseRequest::kGetVectorIDs, INFO_REQUEST_GROUP}, - // table operations + // collection operations {BaseRequest::kShowTables, INFO_REQUEST_GROUP}, {BaseRequest::kCreateTable, DDL_DML_REQUEST_GROUP}, {BaseRequest::kHasTable, INFO_REQUEST_GROUP}, @@ -130,10 +130,10 @@ BaseRequest::set_status(const Status& status) { } std::string -BaseRequest::TableNotExistMsg(const std::string& table_name) { - return "Table " + table_name + - " does not exist. Use milvus.has_table to verify whether the table exists. " - "You also can check whether the table name exists."; +BaseRequest::TableNotExistMsg(const std::string& collection_name) { + return "Collection " + collection_name + + " does not exist. Use milvus.has_table to verify whether the collection exists. " + "You also can check whether the collection name exists."; } Status diff --git a/core/src/server/delivery/request/BaseRequest.h b/core/src/server/delivery/request/BaseRequest.h index d00a20a6c3..4695ddfac0 100644 --- a/core/src/server/delivery/request/BaseRequest.h +++ b/core/src/server/delivery/request/BaseRequest.h @@ -31,20 +31,21 @@ namespace milvus { namespace server { -struct TableSchema { - std::string table_name_; +struct CollectionSchema { + std::string collection_name_; int64_t dimension_; int64_t index_file_size_; int64_t metric_type_; - TableSchema() { + CollectionSchema() { dimension_ = 0; index_file_size_ = 0; metric_type_ = 0; } - TableSchema(const std::string& table_name, int64_t dimension, int64_t index_file_size, int64_t metric_type) { - table_name_ = table_name; + CollectionSchema(const std::string& collection_name, int64_t dimension, int64_t index_file_size, + int64_t metric_type) { + collection_name_ = collection_name; dimension_ = dimension; index_file_size_ = index_file_size; metric_type_ = metric_type; @@ -68,7 +69,7 @@ struct TopKQueryResult { }; struct IndexParam { - std::string table_name_; + std::string collection_name_; int64_t index_type_; std::string extra_params_; @@ -76,20 +77,20 @@ struct IndexParam { index_type_ = 0; } - IndexParam(const std::string& table_name, int64_t index_type) { - table_name_ = table_name; + IndexParam(const std::string& collection_name, int64_t index_type) { + collection_name_ = collection_name; index_type_ = index_type; } }; struct PartitionParam { - std::string table_name_; + std::string collection_name_; std::string tag_; PartitionParam() = default; - PartitionParam(const std::string& table_name, const std::string& tag) { - table_name_ = table_name; + PartitionParam(const std::string& collection_name, const std::string& tag) { + collection_name_ = collection_name; tag_ = tag; } }; @@ -126,7 +127,7 @@ class BaseRequest { kGetVectorByID, kGetVectorIDs, - // table operations + // collection operations kShowTables = 300, kCreateTable, kHasTable, @@ -208,7 +209,7 @@ class BaseRequest { OnPostExecute(); std::string - TableNotExistMsg(const std::string& table_name); + TableNotExistMsg(const std::string& collection_name); protected: const std::shared_ptr context_; diff --git a/core/src/server/delivery/request/CompactRequest.cpp b/core/src/server/delivery/request/CompactRequest.cpp index 60e004ea46..30ffee4cbe 100644 --- a/core/src/server/delivery/request/CompactRequest.cpp +++ b/core/src/server/delivery/request/CompactRequest.cpp @@ -26,47 +26,48 @@ namespace milvus { namespace server { -CompactRequest::CompactRequest(const std::shared_ptr& context, const std::string& table_name) - : BaseRequest(context, BaseRequest::kCompact), table_name_(table_name) { +CompactRequest::CompactRequest(const std::shared_ptr& context, + const std::string& collection_name) + : BaseRequest(context, BaseRequest::kCompact), collection_name_(collection_name) { } BaseRequestPtr -CompactRequest::Create(const std::shared_ptr& context, const std::string& table_name) { - return std::shared_ptr(new CompactRequest(context, table_name)); +CompactRequest::Create(const std::shared_ptr& context, const std::string& collection_name) { + return std::shared_ptr(new CompactRequest(context, collection_name)); } Status CompactRequest::OnExecute() { try { - std::string hdr = "CompactRequest(table=" + table_name_ + ")"; + std::string hdr = "CompactRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } rc.RecordSection("check validation"); - // step 2: check table existence - status = DBWrapper::DB()->Compact(table_name_); + // step 2: check collection existence + status = DBWrapper::DB()->Compact(collection_name_); if (!status.ok()) { return status; } diff --git a/core/src/server/delivery/request/CompactRequest.h b/core/src/server/delivery/request/CompactRequest.h index a19e4955b0..c8f3fd3341 100644 --- a/core/src/server/delivery/request/CompactRequest.h +++ b/core/src/server/delivery/request/CompactRequest.h @@ -28,16 +28,16 @@ namespace server { class CompactRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name); + Create(const std::shared_ptr& context, const std::string& collection_name); protected: - CompactRequest(const std::shared_ptr& context, const std::string& table_name); + CompactRequest(const std::shared_ptr& context, const std::string& collection_name); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; }; } // namespace server diff --git a/core/src/server/delivery/request/CountTableRequest.cpp b/core/src/server/delivery/request/CountTableRequest.cpp index efb1d28a5d..41296fbc16 100644 --- a/core/src/server/delivery/request/CountTableRequest.cpp +++ b/core/src/server/delivery/request/CountTableRequest.cpp @@ -23,41 +23,41 @@ namespace milvus { namespace server { CountTableRequest::CountTableRequest(const std::shared_ptr& context, - const std::string& table_name, int64_t& row_count) - : BaseRequest(context, BaseRequest::kCountTable), table_name_(table_name), row_count_(row_count) { + const std::string& collection_name, int64_t& row_count) + : BaseRequest(context, BaseRequest::kCountTable), collection_name_(collection_name), row_count_(row_count) { } BaseRequestPtr -CountTableRequest::Create(const std::shared_ptr& context, const std::string& table_name, +CountTableRequest::Create(const std::shared_ptr& context, const std::string& collection_name, int64_t& row_count) { - return std::shared_ptr(new CountTableRequest(context, table_name, row_count)); + return std::shared_ptr(new CountTableRequest(context, collection_name, row_count)); } Status CountTableRequest::OnExecute() { try { - std::string hdr = "CountTableRequest(table=" + table_name_ + ")"; + std::string hdr = "CountTableRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } @@ -65,13 +65,13 @@ CountTableRequest::OnExecute() { // step 2: get row count uint64_t row_count = 0; - status = DBWrapper::DB()->GetTableRowCount(table_name_, row_count); + status = DBWrapper::DB()->GetTableRowCount(collection_name_, row_count); fiu_do_on("CountTableRequest.OnExecute.db_not_found", status = Status(DB_NOT_FOUND, "")); fiu_do_on("CountTableRequest.OnExecute.status_error", status = Status(SERVER_UNEXPECTED_ERROR, "")); fiu_do_on("CountTableRequest.OnExecute.throw_std_exception", throw std::exception()); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } diff --git a/core/src/server/delivery/request/CountTableRequest.h b/core/src/server/delivery/request/CountTableRequest.h index d8f5d6d7cb..ea6651deea 100644 --- a/core/src/server/delivery/request/CountTableRequest.h +++ b/core/src/server/delivery/request/CountTableRequest.h @@ -22,17 +22,18 @@ namespace server { class CountTableRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, int64_t& row_count); + Create(const std::shared_ptr& context, const std::string& collection_name, + int64_t& row_count); protected: - CountTableRequest(const std::shared_ptr& context, const std::string& table_name, + CountTableRequest(const std::shared_ptr& context, const std::string& collection_name, int64_t& row_count); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; int64_t& row_count_; }; diff --git a/core/src/server/delivery/request/CreateIndexRequest.cpp b/core/src/server/delivery/request/CreateIndexRequest.cpp index c800382dbf..891fe96eda 100644 --- a/core/src/server/delivery/request/CreateIndexRequest.cpp +++ b/core/src/server/delivery/request/CreateIndexRequest.cpp @@ -25,47 +25,47 @@ namespace milvus { namespace server { CreateIndexRequest::CreateIndexRequest(const std::shared_ptr& context, - const std::string& table_name, int64_t index_type, + const std::string& collection_name, int64_t index_type, const milvus::json& json_params) : BaseRequest(context, BaseRequest::kCreateIndex), - table_name_(table_name), + collection_name_(collection_name), index_type_(index_type), json_params_(json_params) { } BaseRequestPtr -CreateIndexRequest::Create(const std::shared_ptr& context, const std::string& table_name, +CreateIndexRequest::Create(const std::shared_ptr& context, const std::string& collection_name, int64_t index_type, const milvus::json& json_params) { - return std::shared_ptr(new CreateIndexRequest(context, table_name, index_type, json_params)); + return std::shared_ptr(new CreateIndexRequest(context, collection_name, index_type, json_params)); } Status CreateIndexRequest::OnExecute() { try { - std::string hdr = "CreateIndexRequest(table=" + table_name_ + ")"; + std::string hdr = "CreateIndexRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); fiu_do_on("CreateIndexRequest.OnExecute.not_has_table", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); fiu_do_on("CreateIndexRequest.OnExecute.throw_std.exception", throw std::exception()); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } @@ -80,8 +80,8 @@ CreateIndexRequest::OnExecute() { } // step 2: binary and float vector support different index/metric type, need to adapt here - engine::meta::TableSchema table_info; - table_info.table_id_ = table_name_; + engine::meta::CollectionSchema table_info; + table_info.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_info); int32_t adapter_index_type = index_type_; @@ -91,7 +91,7 @@ CreateIndexRequest::OnExecute() { } else if (adapter_index_type == static_cast(engine::EngineType::FAISS_IVFFLAT)) { adapter_index_type = static_cast(engine::EngineType::FAISS_BIN_IVFFLAT); } else { - return Status(SERVER_INVALID_INDEX_TYPE, "Invalid index type for table metric type"); + return Status(SERVER_INVALID_INDEX_TYPE, "Invalid index type for collection metric type"); } } @@ -115,7 +115,7 @@ CreateIndexRequest::OnExecute() { engine::TableIndex index; index.engine_type_ = adapter_index_type; index.extra_params_ = json_params_; - status = DBWrapper::DB()->CreateIndex(table_name_, index); + status = DBWrapper::DB()->CreateIndex(collection_name_, index); fiu_do_on("CreateIndexRequest.OnExecute.create_index_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { diff --git a/core/src/server/delivery/request/CreateIndexRequest.h b/core/src/server/delivery/request/CreateIndexRequest.h index efc50bca35..070031f17e 100644 --- a/core/src/server/delivery/request/CreateIndexRequest.h +++ b/core/src/server/delivery/request/CreateIndexRequest.h @@ -21,18 +21,18 @@ namespace server { class CreateIndexRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, int64_t index_type, - const milvus::json& json_params); + Create(const std::shared_ptr& context, const std::string& collection_name, + int64_t index_type, const milvus::json& json_params); protected: - CreateIndexRequest(const std::shared_ptr& context, const std::string& table_name, + CreateIndexRequest(const std::shared_ptr& context, const std::string& collection_name, int64_t index_type, const milvus::json& json_params); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; const int64_t index_type_; milvus::json json_params_; }; diff --git a/core/src/server/delivery/request/CreatePartitionRequest.cpp b/core/src/server/delivery/request/CreatePartitionRequest.cpp index a361c0e8ef..d7fd063fdb 100644 --- a/core/src/server/delivery/request/CreatePartitionRequest.cpp +++ b/core/src/server/delivery/request/CreatePartitionRequest.cpp @@ -23,24 +23,24 @@ namespace milvus { namespace server { CreatePartitionRequest::CreatePartitionRequest(const std::shared_ptr& context, - const std::string& table_name, const std::string& tag) - : BaseRequest(context, BaseRequest::kCreatePartition), table_name_(table_name), tag_(tag) { + const std::string& collection_name, const std::string& tag) + : BaseRequest(context, BaseRequest::kCreatePartition), collection_name_(collection_name), tag_(tag) { } BaseRequestPtr -CreatePartitionRequest::Create(const std::shared_ptr& context, const std::string& table_name, - const std::string& tag) { - return std::shared_ptr(new CreatePartitionRequest(context, table_name, tag)); +CreatePartitionRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, const std::string& tag) { + return std::shared_ptr(new CreatePartitionRequest(context, collection_name, tag)); } Status CreatePartitionRequest::OnExecute() { - std::string hdr = "CreatePartitionRequest(table=" + table_name_ + ", partition_tag=" + tag_ + ")"; + std::string hdr = "CreatePartitionRequest(collection=" + collection_name_ + ", partition_tag=" + tag_ + ")"; TimeRecorderAuto rc(hdr); try { // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); fiu_do_on("CreatePartitionRequest.OnExecute.invalid_table_name", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { @@ -58,28 +58,28 @@ CreatePartitionRequest::OnExecute() { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); fiu_do_on("CreatePartitionRequest.OnExecute.invalid_partition_tags", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } rc.RecordSection("check validation"); // step 2: create partition - status = DBWrapper::DB()->CreatePartition(table_name_, "", tag_); + status = DBWrapper::DB()->CreatePartition(collection_name_, "", tag_); fiu_do_on("CreatePartitionRequest.OnExecute.db_already_exist", status = Status(milvus::DB_ALREADY_EXIST, "")); fiu_do_on("CreatePartitionRequest.OnExecute.create_partition_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); diff --git a/core/src/server/delivery/request/CreatePartitionRequest.h b/core/src/server/delivery/request/CreatePartitionRequest.h index 286a423f16..c1809ae416 100644 --- a/core/src/server/delivery/request/CreatePartitionRequest.h +++ b/core/src/server/delivery/request/CreatePartitionRequest.h @@ -25,14 +25,14 @@ class CreatePartitionRequest : public BaseRequest { const std::string& tag); protected: - CreatePartitionRequest(const std::shared_ptr& context, const std::string& table_name, + CreatePartitionRequest(const std::shared_ptr& context, const std::string& collection_name, const std::string& tag); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; const std::string tag_; }; diff --git a/core/src/server/delivery/request/CreateTableRequest.cpp b/core/src/server/delivery/request/CreateTableRequest.cpp index cfcb05f7e6..c4847d6acb 100644 --- a/core/src/server/delivery/request/CreateTableRequest.cpp +++ b/core/src/server/delivery/request/CreateTableRequest.cpp @@ -25,30 +25,31 @@ namespace milvus { namespace server { CreateTableRequest::CreateTableRequest(const std::shared_ptr& context, - const std::string& table_name, int64_t dimension, int64_t index_file_size, + const std::string& collection_name, int64_t dimension, int64_t index_file_size, int64_t metric_type) : BaseRequest(context, BaseRequest::kCreateTable), - table_name_(table_name), + collection_name_(collection_name), dimension_(dimension), index_file_size_(index_file_size), metric_type_(metric_type) { } BaseRequestPtr -CreateTableRequest::Create(const std::shared_ptr& context, const std::string& table_name, +CreateTableRequest::Create(const std::shared_ptr& context, const std::string& collection_name, int64_t dimension, int64_t index_file_size, int64_t metric_type) { return std::shared_ptr( - new CreateTableRequest(context, table_name, dimension, index_file_size, metric_type)); + new CreateTableRequest(context, collection_name, dimension, index_file_size, metric_type)); } Status CreateTableRequest::OnExecute() { - std::string hdr = "CreateTableRequest(table=" + table_name_ + ", dimension=" + std::to_string(dimension_) + ")"; + std::string hdr = + "CreateTableRequest(collection=" + collection_name_ + ", dimension=" + std::to_string(dimension_) + ")"; TimeRecorderAuto rc(hdr); try { // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } @@ -72,9 +73,9 @@ CreateTableRequest::OnExecute() { rc.RecordSection("check validation"); - // step 2: construct table schema - engine::meta::TableSchema table_info; - table_info.table_id_ = table_name_; + // step 2: construct collection schema + engine::meta::CollectionSchema table_info; + table_info.collection_id_ = collection_name_; table_info.dimension_ = static_cast(dimension_); table_info.index_file_size_ = index_file_size_; table_info.metric_type_ = metric_type_; @@ -88,14 +89,14 @@ CreateTableRequest::OnExecute() { } } - // step 3: create table + // step 3: create collection status = DBWrapper::DB()->CreateTable(table_info); fiu_do_on("CreateTableRequest.OnExecute.db_already_exist", status = Status(milvus::DB_ALREADY_EXIST, "")); fiu_do_on("CreateTableRequest.OnExecute.create_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); fiu_do_on("CreateTableRequest.OnExecute.throw_std_exception", throw std::exception()); if (!status.ok()) { - // table could exist + // collection could exist if (status.code() == DB_ALREADY_EXIST) { return Status(SERVER_INVALID_TABLE_NAME, status.message()); } diff --git a/core/src/server/delivery/request/CreateTableRequest.h b/core/src/server/delivery/request/CreateTableRequest.h index 27f01b1165..80c30a6593 100644 --- a/core/src/server/delivery/request/CreateTableRequest.h +++ b/core/src/server/delivery/request/CreateTableRequest.h @@ -22,18 +22,18 @@ namespace server { class CreateTableRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, int64_t dimension, - int64_t index_file_size, int64_t metric_type); + Create(const std::shared_ptr& context, const std::string& collection_name, + int64_t dimension, int64_t index_file_size, int64_t metric_type); protected: - CreateTableRequest(const std::shared_ptr& context, const std::string& table_name, + CreateTableRequest(const std::shared_ptr& context, const std::string& collection_name, int64_t dimension, int64_t index_file_size, int64_t metric_type); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; int64_t dimension_; int64_t index_file_size_; int64_t metric_type_; diff --git a/core/src/server/delivery/request/DeleteByIDRequest.cpp b/core/src/server/delivery/request/DeleteByIDRequest.cpp index 92bd066d42..c5a5e2771c 100644 --- a/core/src/server/delivery/request/DeleteByIDRequest.cpp +++ b/core/src/server/delivery/request/DeleteByIDRequest.cpp @@ -30,14 +30,14 @@ namespace milvus { namespace server { DeleteByIDRequest::DeleteByIDRequest(const std::shared_ptr& context, - const std::string& table_name, const std::vector& vector_ids) - : BaseRequest(context, BaseRequest::kDeleteByID), table_name_(table_name), vector_ids_(vector_ids) { + const std::string& collection_name, const std::vector& vector_ids) + : BaseRequest(context, BaseRequest::kDeleteByID), collection_name_(collection_name), vector_ids_(vector_ids) { } BaseRequestPtr -DeleteByIDRequest::Create(const std::shared_ptr& context, const std::string& table_name, +DeleteByIDRequest::Create(const std::shared_ptr& context, const std::string& collection_name, const std::vector& vector_ids) { - return std::shared_ptr(new DeleteByIDRequest(context, table_name, vector_ids)); + return std::shared_ptr(new DeleteByIDRequest(context, collection_name, vector_ids)); } Status @@ -46,28 +46,28 @@ DeleteByIDRequest::OnExecute() { TimeRecorderAuto rc("DeleteByIDRequest"); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // step 2: check table existence - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 2: check collection existence + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } - // Check table's index type supports delete + // Check collection's index type supports delete if (table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IDMAP && table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_BIN_IDMAP && table_schema.engine_type_ != (int32_t)engine::EngineType::HNSW && @@ -84,7 +84,7 @@ DeleteByIDRequest::OnExecute() { rc.RecordSection("check validation"); - status = DBWrapper::DB()->DeleteVectors(table_name_, vector_ids_); + status = DBWrapper::DB()->DeleteVectors(collection_name_, vector_ids_); if (!status.ok()) { return status; } diff --git a/core/src/server/delivery/request/DeleteByIDRequest.h b/core/src/server/delivery/request/DeleteByIDRequest.h index 4f89fed296..2ebadd5f56 100644 --- a/core/src/server/delivery/request/DeleteByIDRequest.h +++ b/core/src/server/delivery/request/DeleteByIDRequest.h @@ -29,18 +29,18 @@ namespace server { class DeleteByIDRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, const std::vector& vector_ids); protected: - DeleteByIDRequest(const std::shared_ptr& context, const std::string& table_name, + DeleteByIDRequest(const std::shared_ptr& context, const std::string& collection_name, const std::vector& vector_ids); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; const std::vector& vector_ids_; }; diff --git a/core/src/server/delivery/request/DescribeIndexRequest.cpp b/core/src/server/delivery/request/DescribeIndexRequest.cpp index 737f8ab0a8..dac10ad7ab 100644 --- a/core/src/server/delivery/request/DescribeIndexRequest.cpp +++ b/core/src/server/delivery/request/DescribeIndexRequest.cpp @@ -22,48 +22,48 @@ namespace milvus { namespace server { DescribeIndexRequest::DescribeIndexRequest(const std::shared_ptr& context, - const std::string& table_name, IndexParam& index_param) - : BaseRequest(context, BaseRequest::kDescribeIndex), table_name_(table_name), index_param_(index_param) { + const std::string& collection_name, IndexParam& index_param) + : BaseRequest(context, BaseRequest::kDescribeIndex), collection_name_(collection_name), index_param_(index_param) { } BaseRequestPtr -DescribeIndexRequest::Create(const std::shared_ptr& context, const std::string& table_name, - IndexParam& index_param) { - return std::shared_ptr(new DescribeIndexRequest(context, table_name, index_param)); +DescribeIndexRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, IndexParam& index_param) { + return std::shared_ptr(new DescribeIndexRequest(context, collection_name, index_param)); } Status DescribeIndexRequest::OnExecute() { try { fiu_do_on("DescribeIndexRequest.OnExecute.throw_std_exception", throw std::exception()); - std::string hdr = "DescribeIndexRequest(table=" + table_name_ + ")"; + std::string hdr = "DescribeIndexRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } - // step 2: check table existence + // step 2: check collection existence engine::TableIndex index; - status = DBWrapper::DB()->DescribeIndex(table_name_, index); + status = DBWrapper::DB()->DescribeIndex(collection_name_, index); if (!status.ok()) { return status; } @@ -76,7 +76,7 @@ DescribeIndexRequest::OnExecute() { index.engine_type_ = (int32_t)engine::EngineType::FAISS_IVFFLAT; } - index_param_.table_name_ = table_name_; + index_param_.collection_name_ = collection_name_; index_param_.index_type_ = index.engine_type_; index_param_.extra_params_ = index.extra_params_.dump(); } catch (std::exception& ex) { diff --git a/core/src/server/delivery/request/DescribeIndexRequest.h b/core/src/server/delivery/request/DescribeIndexRequest.h index d4f4c4727c..db0eb47935 100644 --- a/core/src/server/delivery/request/DescribeIndexRequest.h +++ b/core/src/server/delivery/request/DescribeIndexRequest.h @@ -22,18 +22,18 @@ namespace server { class DescribeIndexRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, IndexParam& index_param); protected: - DescribeIndexRequest(const std::shared_ptr& context, const std::string& table_name, + DescribeIndexRequest(const std::shared_ptr& context, const std::string& collection_name, IndexParam& index_param); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; IndexParam& index_param_; }; diff --git a/core/src/server/delivery/request/DescribeTableRequest.cpp b/core/src/server/delivery/request/DescribeTableRequest.cpp index edbcac846c..2e765da343 100644 --- a/core/src/server/delivery/request/DescribeTableRequest.cpp +++ b/core/src/server/delivery/request/DescribeTableRequest.cpp @@ -22,49 +22,49 @@ namespace milvus { namespace server { DescribeTableRequest::DescribeTableRequest(const std::shared_ptr& context, - const std::string& table_name, TableSchema& schema) - : BaseRequest(context, BaseRequest::kDescribeTable), table_name_(table_name), schema_(schema) { + const std::string& collection_name, CollectionSchema& schema) + : BaseRequest(context, BaseRequest::kDescribeTable), collection_name_(collection_name), schema_(schema) { } BaseRequestPtr -DescribeTableRequest::Create(const std::shared_ptr& context, const std::string& table_name, - TableSchema& schema) { - return std::shared_ptr(new DescribeTableRequest(context, table_name, schema)); +DescribeTableRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, CollectionSchema& schema) { + return std::shared_ptr(new DescribeTableRequest(context, collection_name, schema)); } Status DescribeTableRequest::OnExecute() { - std::string hdr = "DescribeTableRequest(table=" + table_name_ + ")"; + std::string hdr = "DescribeTableRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); try { // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // step 2: get table info - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 2: get collection info + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); fiu_do_on("DescribeTableRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); fiu_do_on("DescribeTableRequest.OnExecute.throw_std_exception", throw std::exception()); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } - schema_.table_name_ = table_schema.table_id_; + schema_.collection_name_ = table_schema.collection_id_; schema_.dimension_ = static_cast(table_schema.dimension_); schema_.index_file_size_ = table_schema.index_file_size_; schema_.metric_type_ = table_schema.metric_type_; diff --git a/core/src/server/delivery/request/DescribeTableRequest.h b/core/src/server/delivery/request/DescribeTableRequest.h index 45c3e6bd1e..36534d9062 100644 --- a/core/src/server/delivery/request/DescribeTableRequest.h +++ b/core/src/server/delivery/request/DescribeTableRequest.h @@ -22,18 +22,19 @@ namespace server { class DescribeTableRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, TableSchema& schema); + Create(const std::shared_ptr& context, const std::string& collection_name, + CollectionSchema& schema); protected: - DescribeTableRequest(const std::shared_ptr& context, const std::string& table_name, - TableSchema& schema); + DescribeTableRequest(const std::shared_ptr& context, const std::string& collection_name, + CollectionSchema& schema); Status OnExecute() override; private: - const std::string table_name_; - TableSchema& schema_; + const std::string collection_name_; + CollectionSchema& schema_; }; } // namespace server diff --git a/core/src/server/delivery/request/DropIndexRequest.cpp b/core/src/server/delivery/request/DropIndexRequest.cpp index fd3e0273f3..8523588078 100644 --- a/core/src/server/delivery/request/DropIndexRequest.cpp +++ b/core/src/server/delivery/request/DropIndexRequest.cpp @@ -22,49 +22,49 @@ namespace milvus { namespace server { DropIndexRequest::DropIndexRequest(const std::shared_ptr& context, - const std::string& table_name) - : BaseRequest(context, BaseRequest::kDropIndex), table_name_(table_name) { + const std::string& collection_name) + : BaseRequest(context, BaseRequest::kDropIndex), collection_name_(collection_name) { } BaseRequestPtr -DropIndexRequest::Create(const std::shared_ptr& context, const std::string& table_name) { - return std::shared_ptr(new DropIndexRequest(context, table_name)); +DropIndexRequest::Create(const std::shared_ptr& context, const std::string& collection_name) { + return std::shared_ptr(new DropIndexRequest(context, collection_name)); } Status DropIndexRequest::OnExecute() { try { fiu_do_on("DropIndexRequest.OnExecute.throw_std_exception", throw std::exception()); - std::string hdr = "DropIndexRequest(table=" + table_name_ + ")"; + std::string hdr = "DropIndexRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); fiu_do_on("DropIndexRequest.OnExecute.table_not_exist", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } rc.RecordSection("check validation"); // step 2: drop index - status = DBWrapper::DB()->DropIndex(table_name_); + status = DBWrapper::DB()->DropIndex(collection_name_); fiu_do_on("DropIndexRequest.OnExecute.drop_index_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { return status; diff --git a/core/src/server/delivery/request/DropIndexRequest.h b/core/src/server/delivery/request/DropIndexRequest.h index c79bba3baa..f604378667 100644 --- a/core/src/server/delivery/request/DropIndexRequest.h +++ b/core/src/server/delivery/request/DropIndexRequest.h @@ -22,16 +22,16 @@ namespace server { class DropIndexRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name); + Create(const std::shared_ptr& context, const std::string& collection_name); protected: - DropIndexRequest(const std::shared_ptr& context, const std::string& table_name); + DropIndexRequest(const std::shared_ptr& context, const std::string& collection_name); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; }; } // namespace server diff --git a/core/src/server/delivery/request/DropPartitionRequest.cpp b/core/src/server/delivery/request/DropPartitionRequest.cpp index c8f99693d1..672547c293 100644 --- a/core/src/server/delivery/request/DropPartitionRequest.cpp +++ b/core/src/server/delivery/request/DropPartitionRequest.cpp @@ -23,26 +23,26 @@ namespace milvus { namespace server { DropPartitionRequest::DropPartitionRequest(const std::shared_ptr& context, - const std::string& table_name, const std::string& tag) - : BaseRequest(context, BaseRequest::kDropPartition), table_name_(table_name), tag_(tag) { + const std::string& collection_name, const std::string& tag) + : BaseRequest(context, BaseRequest::kDropPartition), collection_name_(collection_name), tag_(tag) { } BaseRequestPtr -DropPartitionRequest::Create(const std::shared_ptr& context, const std::string& table_name, - const std::string& tag) { - return std::shared_ptr(new DropPartitionRequest(context, table_name, tag)); +DropPartitionRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, const std::string& tag) { + return std::shared_ptr(new DropPartitionRequest(context, collection_name, tag)); } Status DropPartitionRequest::OnExecute() { - std::string hdr = "DropPartitionRequest(table=" + table_name_ + ", partition_tag=" + tag_ + ")"; + std::string hdr = "DropPartitionRequest(collection=" + collection_name_ + ", partition_tag=" + tag_ + ")"; TimeRecorderAuto rc(hdr); - std::string table_name = table_name_; + std::string collection_name = collection_name_; std::string partition_tag = tag_; - // step 1: check table name - auto status = ValidationUtil::ValidateTableName(table_name); + // step 1: check collection name + auto status = ValidationUtil::ValidateCollectionName(collection_name); fiu_do_on("DropPartitionRequest.OnExecute.invalid_table_name", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { @@ -61,27 +61,27 @@ DropPartitionRequest::OnExecute() { return status; } - // step 3: check table - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 3: check collection + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } rc.RecordSection("check validation"); // step 4: drop partition - return DBWrapper::DB()->DropPartitionByTag(table_name, partition_tag); + return DBWrapper::DB()->DropPartitionByTag(collection_name, partition_tag); } } // namespace server diff --git a/core/src/server/delivery/request/DropPartitionRequest.h b/core/src/server/delivery/request/DropPartitionRequest.h index f82e84cbd7..0e74fa208d 100644 --- a/core/src/server/delivery/request/DropPartitionRequest.h +++ b/core/src/server/delivery/request/DropPartitionRequest.h @@ -21,18 +21,18 @@ namespace server { class DropPartitionRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, const std::string& tag); protected: - DropPartitionRequest(const std::shared_ptr& context, const std::string& table_name, + DropPartitionRequest(const std::shared_ptr& context, const std::string& collection_name, const std::string& tag); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; const std::string tag_; }; diff --git a/core/src/server/delivery/request/DropTableRequest.cpp b/core/src/server/delivery/request/DropTableRequest.cpp index 86eabd868c..01a4c299c9 100644 --- a/core/src/server/delivery/request/DropTableRequest.cpp +++ b/core/src/server/delivery/request/DropTableRequest.cpp @@ -23,31 +23,31 @@ namespace milvus { namespace server { DropTableRequest::DropTableRequest(const std::shared_ptr& context, - const std::string& table_name) - : BaseRequest(context, BaseRequest::kDropTable), table_name_(table_name) { + const std::string& collection_name) + : BaseRequest(context, BaseRequest::kDropTable), collection_name_(collection_name) { } BaseRequestPtr -DropTableRequest::Create(const std::shared_ptr& context, const std::string& table_name) { - return std::shared_ptr(new DropTableRequest(context, table_name)); +DropTableRequest::Create(const std::shared_ptr& context, const std::string& collection_name) { + return std::shared_ptr(new DropTableRequest(context, collection_name)); } Status DropTableRequest::OnExecute() { try { - std::string hdr = "DropTableRequest(table=" + table_name_ + ")"; + std::string hdr = "DropTableRequest(collection=" + collection_name_ + ")"; TimeRecorder rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // step 2: check table existence - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 2: check collection existence + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); fiu_do_on("DropTableRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, "")); fiu_do_on("DropTableRequest.OnExecute.describe_table_fail", @@ -55,20 +55,20 @@ DropTableRequest::OnExecute() { fiu_do_on("DropTableRequest.OnExecute.throw_std_exception", throw std::exception()); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } rc.RecordSection("check validation"); - // step 3: Drop table - status = DBWrapper::DB()->DropTable(table_name_); + // step 3: Drop collection + status = DBWrapper::DB()->DropTable(collection_name_); fiu_do_on("DropTableRequest.OnExecute.drop_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { return status; diff --git a/core/src/server/delivery/request/DropTableRequest.h b/core/src/server/delivery/request/DropTableRequest.h index 7ef2b3cfd2..5bb8964df5 100644 --- a/core/src/server/delivery/request/DropTableRequest.h +++ b/core/src/server/delivery/request/DropTableRequest.h @@ -22,16 +22,16 @@ namespace server { class DropTableRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name); + Create(const std::shared_ptr& context, const std::string& collection_name); protected: - DropTableRequest(const std::shared_ptr& context, const std::string& table_name); + DropTableRequest(const std::shared_ptr& context, const std::string& collection_name); Status OnExecute() override; private: - std::string table_name_; + std::string collection_name_; }; } // namespace server diff --git a/core/src/server/delivery/request/FlushRequest.cpp b/core/src/server/delivery/request/FlushRequest.cpp index d0560fae7f..4fdf217c0e 100644 --- a/core/src/server/delivery/request/FlushRequest.cpp +++ b/core/src/server/delivery/request/FlushRequest.cpp @@ -27,20 +27,20 @@ namespace milvus { namespace server { FlushRequest::FlushRequest(const std::shared_ptr& context, - const std::vector& table_names) - : BaseRequest(context, BaseRequest::kFlush), table_names_(table_names) { + const std::vector& collection_names) + : BaseRequest(context, BaseRequest::kFlush), collection_names_(collection_names) { } BaseRequestPtr FlushRequest::Create(const std::shared_ptr& context, - const std::vector& table_names) { - return std::shared_ptr(new FlushRequest(context, table_names)); + const std::vector& collection_names) { + return std::shared_ptr(new FlushRequest(context, collection_names)); } Status FlushRequest::OnExecute() { std::string hdr = "FlushRequest flush tables: "; - for (auto& name : table_names_) { + for (auto& name : collection_names_) { hdr += name; hdr += ", "; } @@ -49,10 +49,10 @@ FlushRequest::OnExecute() { Status status = Status::OK(); SERVER_LOG_DEBUG << hdr; - for (auto& name : table_names_) { - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = name; + for (auto& name : collection_names_) { + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = name; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { diff --git a/core/src/server/delivery/request/FlushRequest.h b/core/src/server/delivery/request/FlushRequest.h index 5872c41863..7e865a98ca 100644 --- a/core/src/server/delivery/request/FlushRequest.h +++ b/core/src/server/delivery/request/FlushRequest.h @@ -29,16 +29,17 @@ namespace server { class FlushRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::vector& table_names); + Create(const std::shared_ptr& context, const std::vector& collection_names); protected: - FlushRequest(const std::shared_ptr& context, const std::vector& table_names); + FlushRequest(const std::shared_ptr& context, + const std::vector& collection_names); Status OnExecute() override; private: - std::vector table_names_; + std::vector collection_names_; }; } // namespace server diff --git a/core/src/server/delivery/request/GetVectorByIDRequest.cpp b/core/src/server/delivery/request/GetVectorByIDRequest.cpp index b8fda90323..cfd12d0120 100644 --- a/core/src/server/delivery/request/GetVectorByIDRequest.cpp +++ b/core/src/server/delivery/request/GetVectorByIDRequest.cpp @@ -28,25 +28,29 @@ namespace milvus { namespace server { GetVectorByIDRequest::GetVectorByIDRequest(const std::shared_ptr& context, - const std::string& table_name, const std::vector& ids, + const std::string& collection_name, const std::vector& ids, engine::VectorsData& vectors) - : BaseRequest(context, BaseRequest::kGetVectorByID), table_name_(table_name), ids_(ids), vectors_(vectors) { + : BaseRequest(context, BaseRequest::kGetVectorByID), + collection_name_(collection_name), + ids_(ids), + vectors_(vectors) { } BaseRequestPtr -GetVectorByIDRequest::Create(const std::shared_ptr& context, const std::string& table_name, - const std::vector& ids, engine::VectorsData& vectors) { - return std::shared_ptr(new GetVectorByIDRequest(context, table_name, ids, vectors)); +GetVectorByIDRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, const std::vector& ids, + engine::VectorsData& vectors) { + return std::shared_ptr(new GetVectorByIDRequest(context, collection_name, ids, vectors)); } Status GetVectorByIDRequest::OnExecute() { try { - std::string hdr = "GetVectorByIDRequest(table=" + table_name_ + ")"; + std::string hdr = "GetVectorByIDRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } @@ -55,24 +59,24 @@ GetVectorByIDRequest::OnExecute() { return Status(SERVER_INVALID_ARGUMENT, "No vector id specified"); } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } // step 2: get vector data, now only support get one id - return DBWrapper::DB()->GetVectorByID(table_name_, ids_[0], vectors_); + return DBWrapper::DB()->GetVectorByID(collection_name_, ids_[0], vectors_); } catch (std::exception& ex) { return Status(SERVER_UNEXPECTED_ERROR, ex.what()); } diff --git a/core/src/server/delivery/request/GetVectorByIDRequest.h b/core/src/server/delivery/request/GetVectorByIDRequest.h index e10b19965d..c07ae6b12d 100644 --- a/core/src/server/delivery/request/GetVectorByIDRequest.h +++ b/core/src/server/delivery/request/GetVectorByIDRequest.h @@ -29,18 +29,18 @@ namespace server { class GetVectorByIDRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, const std::vector& ids, engine::VectorsData& vectors); protected: - GetVectorByIDRequest(const std::shared_ptr& context, const std::string& table_name, + GetVectorByIDRequest(const std::shared_ptr& context, const std::string& collection_name, const std::vector& ids, engine::VectorsData& vectors); Status OnExecute() override; private: - std::string table_name_; + std::string collection_name_; std::vector ids_; engine::VectorsData& vectors_; }; diff --git a/core/src/server/delivery/request/GetVectorIDsRequest.cpp b/core/src/server/delivery/request/GetVectorIDsRequest.cpp index 7020816640..c397e579ad 100644 --- a/core/src/server/delivery/request/GetVectorIDsRequest.cpp +++ b/core/src/server/delivery/request/GetVectorIDsRequest.cpp @@ -28,51 +28,51 @@ namespace milvus { namespace server { GetVectorIDsRequest::GetVectorIDsRequest(const std::shared_ptr& context, - const std::string& table_name, const std::string& segment_name, + const std::string& collection_name, const std::string& segment_name, std::vector& vector_ids) : BaseRequest(context, BaseRequest::kGetVectorIDs), - table_name_(table_name), + collection_name_(collection_name), segment_name_(segment_name), vector_ids_(vector_ids) { } BaseRequestPtr -GetVectorIDsRequest::Create(const std::shared_ptr& context, const std::string& table_name, +GetVectorIDsRequest::Create(const std::shared_ptr& context, const std::string& collection_name, const std::string& segment_name, std::vector& vector_ids) { - return std::shared_ptr(new GetVectorIDsRequest(context, table_name, segment_name, vector_ids)); + return std::shared_ptr(new GetVectorIDsRequest(context, collection_name, segment_name, vector_ids)); } Status GetVectorIDsRequest::OnExecute() { try { - std::string hdr = "GetVectorIDsRequest(table=" + table_name_ + " segment=" + segment_name_ + ")"; + std::string hdr = "GetVectorIDsRequest(collection=" + collection_name_ + " segment=" + segment_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } // step 2: get vector data, now only support get one id vector_ids_.clear(); - return DBWrapper::DB()->GetVectorIDs(table_name_, segment_name_, vector_ids_); + return DBWrapper::DB()->GetVectorIDs(collection_name_, segment_name_, vector_ids_); } catch (std::exception& ex) { return Status(SERVER_UNEXPECTED_ERROR, ex.what()); } diff --git a/core/src/server/delivery/request/GetVectorIDsRequest.h b/core/src/server/delivery/request/GetVectorIDsRequest.h index bea8ad3ec3..3a87587d06 100644 --- a/core/src/server/delivery/request/GetVectorIDsRequest.h +++ b/core/src/server/delivery/request/GetVectorIDsRequest.h @@ -29,18 +29,18 @@ namespace server { class GetVectorIDsRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, const std::string& segment_name, std::vector& vector_ids); protected: - GetVectorIDsRequest(const std::shared_ptr& context, const std::string& table_name, + GetVectorIDsRequest(const std::shared_ptr& context, const std::string& collection_name, const std::string& segment_name, std::vector& vector_ids); Status OnExecute() override; private: - std::string table_name_; + std::string collection_name_; std::string segment_name_; std::vector& vector_ids_; }; diff --git a/core/src/server/delivery/request/HasTableRequest.cpp b/core/src/server/delivery/request/HasTableRequest.cpp index cde5f9d3f4..d727462529 100644 --- a/core/src/server/delivery/request/HasTableRequest.cpp +++ b/core/src/server/delivery/request/HasTableRequest.cpp @@ -21,37 +21,37 @@ namespace milvus { namespace server { -HasTableRequest::HasTableRequest(const std::shared_ptr& context, const std::string& table_name, - bool& has_table) - : BaseRequest(context, BaseRequest::kHasTable), table_name_(table_name), has_table_(has_table) { +HasTableRequest::HasTableRequest(const std::shared_ptr& context, + const std::string& collection_name, bool& has_table) + : BaseRequest(context, BaseRequest::kHasTable), collection_name_(collection_name), has_table_(has_table) { } BaseRequestPtr -HasTableRequest::Create(const std::shared_ptr& context, const std::string& table_name, +HasTableRequest::Create(const std::shared_ptr& context, const std::string& collection_name, bool& has_table) { - return std::shared_ptr(new HasTableRequest(context, table_name, has_table)); + return std::shared_ptr(new HasTableRequest(context, collection_name, has_table)); } Status HasTableRequest::OnExecute() { try { - std::string hdr = "HasTableRequest(table=" + table_name_ + ")"; + std::string hdr = "HasTableRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } // step 2: check table existence - status = DBWrapper::DB()->HasNativeTable(table_name_, has_table_); + status = DBWrapper::DB()->HasNativeTable(collection_name_, has_table_); fiu_do_on("HasTableRequest.OnExecute.throw_std_exception", throw std::exception()); - // only process root table, ignore partition table + // only process root collection, ignore partition collection if (has_table_) { - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!table_schema.owner_table_.empty()) { has_table_ = false; diff --git a/core/src/server/delivery/request/HasTableRequest.h b/core/src/server/delivery/request/HasTableRequest.h index 117f7d3087..7d2330d499 100644 --- a/core/src/server/delivery/request/HasTableRequest.h +++ b/core/src/server/delivery/request/HasTableRequest.h @@ -22,17 +22,18 @@ namespace server { class HasTableRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, bool& has_table); + Create(const std::shared_ptr& context, const std::string& collection_name, + bool& has_table); protected: - HasTableRequest(const std::shared_ptr& context, const std::string& table_name, + HasTableRequest(const std::shared_ptr& context, const std::string& collection_name, bool& has_table); Status OnExecute() override; private: - std::string table_name_; + std::string collection_name_; bool& has_table_; }; diff --git a/core/src/server/delivery/request/InsertRequest.cpp b/core/src/server/delivery/request/InsertRequest.cpp index d48b37f6cd..ca16899e96 100644 --- a/core/src/server/delivery/request/InsertRequest.cpp +++ b/core/src/server/delivery/request/InsertRequest.cpp @@ -21,6 +21,7 @@ #include #include #include + #ifdef MILVUS_ENABLE_PROFILING #include #endif @@ -28,18 +29,19 @@ namespace milvus { namespace server { -InsertRequest::InsertRequest(const std::shared_ptr& context, const std::string& table_name, - engine::VectorsData& vectors, const std::string& partition_tag) +InsertRequest::InsertRequest(const std::shared_ptr& context, + const std::string& collection_name, engine::VectorsData& vectors, + const std::string& partition_tag) : BaseRequest(context, BaseRequest::kInsert), - table_name_(table_name), + collection_name_(collection_name), vectors_data_(vectors), partition_tag_(partition_tag) { } BaseRequestPtr -InsertRequest::Create(const std::shared_ptr& context, const std::string& table_name, +InsertRequest::Create(const std::shared_ptr& context, const std::string& collection_name, engine::VectorsData& vectors, const std::string& partition_tag) { - return std::shared_ptr(new InsertRequest(context, table_name, vectors, partition_tag)); + return std::shared_ptr(new InsertRequest(context, collection_name, vectors, partition_tag)); } Status @@ -47,12 +49,12 @@ InsertRequest::OnExecute() { try { int64_t vector_count = vectors_data_.vector_count_; fiu_do_on("InsertRequest.OnExecute.throw_std_exception", throw std::exception()); - std::string hdr = "InsertRequest(table=" + table_name_ + ", n=" + std::to_string(vector_count) + + std::string hdr = "InsertRequest(collection=" + collection_name_ + ", n=" + std::to_string(vector_count) + ", partition_tag=" + partition_tag_ + ")"; TimeRecorder rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } @@ -69,26 +71,26 @@ InsertRequest::OnExecute() { } } - // step 2: check table existence - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 2: check collection existence + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); fiu_do_on("InsertRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, "")); fiu_do_on("InsertRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } - // step 3: check table flag + // step 3: check collection flag // all user provide id, or all internal id bool user_provide_ids = !vectors_data_.id_array_.empty(); fiu_do_on("InsertRequest.OnExecute.illegal_vector_id", user_provide_ids = false; @@ -96,7 +98,7 @@ InsertRequest::OnExecute() { // user already provided id before, all insert action require user id if ((table_schema.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) { return Status(SERVER_ILLEGAL_VECTOR_ID, - "Table vector IDs are user-defined. Please provide IDs for all vectors of this table."); + "Entities IDs are user-defined. Please provide IDs for all entities of the collection."); } fiu_do_on("InsertRequest.OnExecute.illegal_vector_id2", user_provide_ids = true; @@ -105,7 +107,7 @@ InsertRequest::OnExecute() { if ((table_schema.flag_ & engine::meta::FLAG_MASK_NO_USERID) != 0 && user_provide_ids) { return Status( SERVER_ILLEGAL_VECTOR_ID, - "Table vector IDs are auto-generated. All vectors of this table must use auto-generated IDs."); + "Entities IDs are auto-generated. All vectors of this collection must use auto-generated IDs."); } rc.RecordSection("check validation"); @@ -117,34 +119,34 @@ InsertRequest::OnExecute() { // step 4: some metric type doesn't support float vectors if (!vectors_data_.float_data_.empty()) { // insert float vectors if (engine::utils::IsBinaryMetricType(table_schema.metric_type_)) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Table metric type doesn't support float vectors."); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Collection metric type doesn't support float vectors."); } // check prepared float data if (vectors_data_.float_data_.size() % vector_count != 0) { return Status(SERVER_INVALID_ROWRECORD_ARRAY, - "The vector dimension must be equal to the table dimension."); + "The vector dimension must be equal to the collection dimension."); } fiu_do_on("InsertRequest.OnExecute.invalid_dim", table_schema.dimension_ = -1); if (vectors_data_.float_data_.size() / vector_count != table_schema.dimension_) { return Status(SERVER_INVALID_VECTOR_DIMENSION, - "The vector dimension must be equal to the table dimension."); + "The vector dimension must be equal to the collection dimension."); } } else if (!vectors_data_.binary_data_.empty()) { // insert binary vectors if (!engine::utils::IsBinaryMetricType(table_schema.metric_type_)) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Table metric type doesn't support binary vectors."); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Collection metric type doesn't support binary vectors."); } // check prepared binary data if (vectors_data_.binary_data_.size() % vector_count != 0) { return Status(SERVER_INVALID_ROWRECORD_ARRAY, - "The vector dimension must be equal to the table dimension."); + "The vector dimension must be equal to the collection dimension."); } if (vectors_data_.binary_data_.size() * 8 / vector_count != table_schema.dimension_) { return Status(SERVER_INVALID_VECTOR_DIMENSION, - "The vector dimension must be equal to the table dimension."); + "The vector dimension must be equal to the collection dimension."); } } @@ -152,7 +154,7 @@ InsertRequest::OnExecute() { auto vec_count = static_cast(vector_count); rc.RecordSection("prepare vectors data"); - status = DBWrapper::DB()->InsertVectors(table_name_, partition_tag_, vectors_data_); + status = DBWrapper::DB()->InsertVectors(collection_name_, partition_tag_, vectors_data_); fiu_do_on("InsertRequest.OnExecute.insert_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { return status; @@ -166,10 +168,10 @@ InsertRequest::OnExecute() { return Status(SERVER_ILLEGAL_VECTOR_ID, msg); } - // step 6: update table flag + // step 6: update collection flag user_provide_ids ? table_schema.flag_ |= engine::meta::FLAG_MASK_HAS_USERID : table_schema.flag_ |= engine::meta::FLAG_MASK_NO_USERID; - status = DBWrapper::DB()->UpdateTableFlag(table_name_, table_schema.flag_); + status = DBWrapper::DB()->UpdateTableFlag(collection_name_, table_schema.flag_); #ifdef MILVUS_ENABLE_PROFILING ProfilerStop(); diff --git a/core/src/server/delivery/request/InsertRequest.h b/core/src/server/delivery/request/InsertRequest.h index 0cfed97b9f..2110e9f1db 100644 --- a/core/src/server/delivery/request/InsertRequest.h +++ b/core/src/server/delivery/request/InsertRequest.h @@ -23,18 +23,18 @@ namespace server { class InsertRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, engine::VectorsData& vectors, const std::string& partition_tag); protected: - InsertRequest(const std::shared_ptr& context, const std::string& table_name, + InsertRequest(const std::shared_ptr& context, const std::string& collection_name, engine::VectorsData& vectors, const std::string& partition_tag); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; engine::VectorsData& vectors_data_; const std::string partition_tag_; }; diff --git a/core/src/server/delivery/request/PreloadTableRequest.cpp b/core/src/server/delivery/request/PreloadTableRequest.cpp index 4648a8faea..b37c1b3198 100644 --- a/core/src/server/delivery/request/PreloadTableRequest.cpp +++ b/core/src/server/delivery/request/PreloadTableRequest.cpp @@ -22,45 +22,46 @@ namespace milvus { namespace server { PreloadTableRequest::PreloadTableRequest(const std::shared_ptr& context, - const std::string& table_name) - : BaseRequest(context, BaseRequest::kPreloadTable), table_name_(table_name) { + const std::string& collection_name) + : BaseRequest(context, BaseRequest::kPreloadTable), collection_name_(collection_name) { } BaseRequestPtr -PreloadTableRequest::Create(const std::shared_ptr& context, const std::string& table_name) { - return std::shared_ptr(new PreloadTableRequest(context, table_name)); +PreloadTableRequest::Create(const std::shared_ptr& context, + const std::string& collection_name) { + return std::shared_ptr(new PreloadTableRequest(context, collection_name)); } Status PreloadTableRequest::OnExecute() { try { - std::string hdr = "PreloadTableRequest(table=" + table_name_ + ")"; + std::string hdr = "PreloadTableRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check arguments - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } - // step 2: check table existence - status = DBWrapper::DB()->PreloadTable(table_name_); + // step 2: check collection existence + status = DBWrapper::DB()->PreloadTable(collection_name_); fiu_do_on("PreloadTableRequest.OnExecute.preload_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); fiu_do_on("PreloadTableRequest.OnExecute.throw_std_exception", throw std::exception()); diff --git a/core/src/server/delivery/request/PreloadTableRequest.h b/core/src/server/delivery/request/PreloadTableRequest.h index abbc78581f..76cb5bf639 100644 --- a/core/src/server/delivery/request/PreloadTableRequest.h +++ b/core/src/server/delivery/request/PreloadTableRequest.h @@ -22,16 +22,16 @@ namespace server { class PreloadTableRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name); + Create(const std::shared_ptr& context, const std::string& collection_name); protected: - PreloadTableRequest(const std::shared_ptr& context, const std::string& table_name); + PreloadTableRequest(const std::shared_ptr& context, const std::string& collection_name); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; }; } // namespace server diff --git a/core/src/server/delivery/request/SearchByIDRequest.cpp b/core/src/server/delivery/request/SearchByIDRequest.cpp index e53d984fa0..8ab4a9f04f 100644 --- a/core/src/server/delivery/request/SearchByIDRequest.cpp +++ b/core/src/server/delivery/request/SearchByIDRequest.cpp @@ -34,11 +34,11 @@ namespace milvus { namespace server { SearchByIDRequest::SearchByIDRequest(const std::shared_ptr& context, - const std::string& table_name, int64_t vector_id, int64_t topk, + const std::string& collection_name, int64_t vector_id, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, TopKQueryResult& result) : BaseRequest(context, BaseRequest::kSearchByID), - table_name_(table_name), + collection_name_(collection_name), vector_id_(vector_id), topk_(topk), extra_params_(extra_params), @@ -47,11 +47,11 @@ SearchByIDRequest::SearchByIDRequest(const std::shared_ptr& context, const std::string& table_name, +SearchByIDRequest::Create(const std::shared_ptr& context, const std::string& collection_name, int64_t vector_id, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, TopKQueryResult& result) { return std::shared_ptr( - new SearchByIDRequest(context, table_name, vector_id, topk, extra_params, partition_list, result)); + new SearchByIDRequest(context, collection_name, vector_id, topk, extra_params, partition_list, result)); } Status @@ -59,15 +59,15 @@ SearchByIDRequest::OnExecute() { try { auto pre_query_ctx = context_->Child("Pre query"); - std::string hdr = "SearchByIDRequest(table=" + table_name_ + ", id=" + std::to_string(vector_id_) + + std::string hdr = "SearchByIDRequest(collection=" + collection_name_ + ", id=" + std::to_string(vector_id_) + ", k=" + std::to_string(topk_) + ", extra_params=" + extra_params_.dump() + ")"; TimeRecorder rc(hdr); // step 1: check empty id - // step 2: check table name - auto status = ValidationUtil::ValidateTableName(table_name_); + // step 2: check collection name + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } @@ -78,20 +78,20 @@ SearchByIDRequest::OnExecute() { return status; } - // step 4: check table existence - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 4: check collection existence + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } @@ -117,7 +117,7 @@ SearchByIDRequest::OnExecute() { } #endif - // step 7: check table's index type supports search by id + // step 7: check collection's index type supports search by id if (table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IDMAP && table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_BIN_IDMAP && table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IVFFLAT && @@ -142,7 +142,7 @@ SearchByIDRequest::OnExecute() { pre_query_ctx->GetTraceContext()->GetSpan()->Finish(); - status = DBWrapper::DB()->QueryByID(context_, table_name_, partition_list_, (size_t)topk_, extra_params_, + status = DBWrapper::DB()->QueryByID(context_, collection_name_, partition_list_, (size_t)topk_, extra_params_, vector_id_, result_ids, result_distances); #ifdef MILVUS_ENABLE_PROFILING @@ -155,7 +155,7 @@ SearchByIDRequest::OnExecute() { } if (result_ids.empty()) { - return Status::OK(); // empty table + return Status::OK(); // empty collection } auto post_query_ctx = context_->Child("Constructing result"); diff --git a/core/src/server/delivery/request/SearchByIDRequest.h b/core/src/server/delivery/request/SearchByIDRequest.h index 8c1fe9ac52..b346873bef 100644 --- a/core/src/server/delivery/request/SearchByIDRequest.h +++ b/core/src/server/delivery/request/SearchByIDRequest.h @@ -29,12 +29,12 @@ namespace server { class SearchByIDRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, int64_t vector_id, - int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, - TopKQueryResult& result); + Create(const std::shared_ptr& context, const std::string& collection_name, + int64_t vector_id, int64_t topk, const milvus::json& extra_params, + const std::vector& partition_list, TopKQueryResult& result); protected: - SearchByIDRequest(const std::shared_ptr& context, const std::string& table_name, + SearchByIDRequest(const std::shared_ptr& context, const std::string& collection_name, int64_t vector_id, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, TopKQueryResult& result); @@ -42,7 +42,7 @@ class SearchByIDRequest : public BaseRequest { OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; const int64_t vector_id_; int64_t topk_; milvus::json extra_params_; diff --git a/core/src/server/delivery/request/SearchCombineRequest.cpp b/core/src/server/delivery/request/SearchCombineRequest.cpp index c3affb9ae6..1bf059f4ca 100644 --- a/core/src/server/delivery/request/SearchCombineRequest.cpp +++ b/core/src/server/delivery/request/SearchCombineRequest.cpp @@ -106,7 +106,7 @@ SearchCombineRequest::Combine(const SearchRequestPtr& request) { // reset some parameters in necessary if (request_list_.empty()) { // validate first request input - auto status = ValidationUtil::ValidateTableName(request->TableName()); + auto status = ValidationUtil::ValidateCollectionName(request->CollectionName()); if (!status.ok()) { return status; } @@ -117,7 +117,7 @@ SearchCombineRequest::Combine(const SearchRequestPtr& request) { } // assign base parameters - table_name_ = request->TableName(); + collection_name_ = request->CollectionName(); min_topk_ = request->TopK() - MAX_TOPK_GAP / 2; if (min_topk_ < 0) { min_topk_ = 0; @@ -138,7 +138,7 @@ SearchCombineRequest::Combine(const SearchRequestPtr& request) { bool SearchCombineRequest::CanCombine(const SearchRequestPtr& request) { - if (table_name_ != request->TableName()) { + if (collection_name_ != request->CollectionName()) { return false; } @@ -170,7 +170,7 @@ SearchCombineRequest::CanCombine(const SearchRequestPtr& request) { bool SearchCombineRequest::CanCombine(const SearchRequestPtr& left, const SearchRequestPtr& right) { - if (left->TableName() != right->TableName()) { + if (left->CollectionName() != right->CollectionName()) { return false; } @@ -226,18 +226,19 @@ SearchCombineRequest::OnExecute() { size_t combined_request = request_list_.size(); SERVER_LOG_DEBUG << "SearchCombineRequest execute, request count=" << combined_request << ", extra_params=" << extra_params_.dump(); - std::string hdr = "SearchCombineRequest(table=" + table_name_ + ")"; + std::string hdr = "SearchCombineRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); // step 1: check table existence // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; auto status = DBWrapper::DB()->DescribeTable(table_schema); + if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - status = Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + status = Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); FreeRequests(status); return status; } else { @@ -246,7 +247,7 @@ SearchCombineRequest::OnExecute() { } } else { if (!table_schema.owner_table_.empty()) { - status = Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + status = Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); FreeRequests(status); return status; } @@ -352,7 +353,7 @@ SearchCombineRequest::OnExecute() { context_list.CreateChild(request_list_, "Combine Query"); if (file_id_list_.empty()) { - status = DBWrapper::DB()->Query(nullptr, table_name_, partition_list, (size_t)search_topk_, + status = DBWrapper::DB()->Query(nullptr, collection_name_, partition_list, (size_t)search_topk_, extra_params_, vectors_data_, result_ids, result_distances); } else { status = DBWrapper::DB()->QueryByFileID(nullptr, file_id_list, (size_t)search_topk_, extra_params_, diff --git a/core/src/server/delivery/request/SearchCombineRequest.h b/core/src/server/delivery/request/SearchCombineRequest.h index 7cea09b1f0..3aa24bb928 100644 --- a/core/src/server/delivery/request/SearchCombineRequest.h +++ b/core/src/server/delivery/request/SearchCombineRequest.h @@ -44,7 +44,7 @@ class SearchCombineRequest : public BaseRequest { FreeRequests(const Status& status); private: - std::string table_name_; + std::string collection_name_; engine::VectorsData vectors_data_; int64_t min_topk_ = 0; int64_t search_topk_ = 0; diff --git a/core/src/server/delivery/request/SearchRequest.cpp b/core/src/server/delivery/request/SearchRequest.cpp index f433bb4d88..d4a5f0475d 100644 --- a/core/src/server/delivery/request/SearchRequest.cpp +++ b/core/src/server/delivery/request/SearchRequest.cpp @@ -27,12 +27,12 @@ namespace milvus { namespace server { -SearchRequest::SearchRequest(const std::shared_ptr& context, const std::string& table_name, - const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params, - const std::vector& partition_list, +SearchRequest::SearchRequest(const std::shared_ptr& context, + const std::string& collection_name, const engine::VectorsData& vectors, int64_t topk, + const milvus::json& extra_params, const std::vector& partition_list, const std::vector& file_id_list, TopKQueryResult& result) : BaseRequest(context, BaseRequest::kSearch), - table_name_(table_name), + collection_name_(collection_name), vectors_data_(vectors), topk_(topk), extra_params_(extra_params), @@ -42,22 +42,22 @@ SearchRequest::SearchRequest(const std::shared_ptr& con } BaseRequestPtr -SearchRequest::Create(const std::shared_ptr& context, const std::string& table_name, +SearchRequest::Create(const std::shared_ptr& context, const std::string& collection_name, const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, const std::vector& file_id_list, TopKQueryResult& result) { return std::shared_ptr( - new SearchRequest(context, table_name, vectors, topk, extra_params, partition_list, file_id_list, result)); + new SearchRequest(context, collection_name, vectors, topk, extra_params, partition_list, file_id_list, result)); } Status SearchRequest::OnPreExecute() { - std::string hdr = "SearchRequest pre-execute(table=" + table_name_ + ")"; + std::string hdr = "SearchRequest pre-execute(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); milvus::server::ContextChild tracer_pre(context_, "Pre Query"); // step 1: check table name - auto status = ValidationUtil::ValidateTableName(table_name_); + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } @@ -83,35 +83,36 @@ SearchRequest::OnExecute() { try { uint64_t vector_count = vectors_data_.vector_count_; fiu_do_on("SearchRequest.OnExecute.throw_std_exception", throw std::exception()); - std::string hdr = "SearchRequest execute(table=" + table_name_ + ", nq=" + std::to_string(vector_count) + - ", k=" + std::to_string(topk_) + ")"; + std::string hdr = "SearchRequest execute(collection=" + collection_name_ + + ", nq=" + std::to_string(vector_count) + ", k=" + std::to_string(topk_) + ")"; TimeRecorderAuto rc(hdr); // step 4: check table existence // only process root table, ignore partition table - table_schema_.table_id_ = table_name_; - auto status = DBWrapper::DB()->DescribeTable(table_schema_); + collection_schema_.collection_id_ = collection_name_; + auto status = DBWrapper::DB()->DescribeTable(collection_schema_); + fiu_do_on("SearchRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { - if (!table_schema_.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + if (!collection_schema_.owner_table_.empty()) { + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } // step 5: check search parameters - status = ValidationUtil::ValidateSearchParams(extra_params_, table_schema_, topk_); + status = ValidationUtil::ValidateSearchParams(extra_params_, collection_schema_, topk_); if (!status.ok()) { return status; } // step 6: check vector data according to metric type - status = ValidationUtil::ValidateVectorData(vectors_data_, table_schema_); + status = ValidationUtil::ValidateVectorData(vectors_data_, collection_schema_); if (!status.ok()) { return status; } @@ -128,7 +129,7 @@ SearchRequest::OnExecute() { engine::ResultDistances result_distances; if (file_id_list_.empty()) { - status = DBWrapper::DB()->Query(context_, table_name_, partition_list_, (size_t)topk_, extra_params_, + status = DBWrapper::DB()->Query(context_, collection_name_, partition_list_, (size_t)topk_, extra_params_, vectors_data_, result_ids, result_distances); } else { status = DBWrapper::DB()->QueryByFileID(context_, file_id_list_, (size_t)topk_, extra_params_, @@ -146,7 +147,7 @@ SearchRequest::OnExecute() { } fiu_do_on("SearchRequest.OnExecute.empty_result_ids", result_ids.clear()); if (result_ids.empty()) { - return Status::OK(); // empty table + return Status::OK(); // empty collection } // step 8: construct result array diff --git a/core/src/server/delivery/request/SearchRequest.h b/core/src/server/delivery/request/SearchRequest.h index 4092cd519d..bdf08b745f 100644 --- a/core/src/server/delivery/request/SearchRequest.h +++ b/core/src/server/delivery/request/SearchRequest.h @@ -23,14 +23,14 @@ namespace server { class SearchRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, const std::vector& file_id_list, TopKQueryResult& result); const std::string& - TableName() const { - return table_name_; + CollectionName() const { + return collection_name_; } const engine::VectorsData& @@ -63,13 +63,13 @@ class SearchRequest : public BaseRequest { return result_; } - const milvus::engine::meta::TableSchema& + const milvus::engine::meta::CollectionSchema& TableSchema() const { - return table_schema_; + return collection_schema_; } protected: - SearchRequest(const std::shared_ptr& context, const std::string& table_name, + SearchRequest(const std::shared_ptr& context, const std::string& collection_name, const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params, const std::vector& partition_list, const std::vector& file_id_list, TopKQueryResult& result); @@ -81,7 +81,7 @@ class SearchRequest : public BaseRequest { OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; const engine::VectorsData& vectors_data_; int64_t topk_; milvus::json extra_params_; @@ -91,7 +91,7 @@ class SearchRequest : public BaseRequest { TopKQueryResult& result_; // for validation - milvus::engine::meta::TableSchema table_schema_; + milvus::engine::meta::CollectionSchema collection_schema_; }; using SearchRequestPtr = std::shared_ptr; diff --git a/core/src/server/delivery/request/ShowPartitionsRequest.cpp b/core/src/server/delivery/request/ShowPartitionsRequest.cpp index a7e97ac7cb..dba0b0633e 100644 --- a/core/src/server/delivery/request/ShowPartitionsRequest.cpp +++ b/core/src/server/delivery/request/ShowPartitionsRequest.cpp @@ -23,49 +23,52 @@ namespace milvus { namespace server { ShowPartitionsRequest::ShowPartitionsRequest(const std::shared_ptr& context, - const std::string& table_name, std::vector& partition_list) - : BaseRequest(context, BaseRequest::kShowPartitions), table_name_(table_name), partition_list_(partition_list) { + const std::string& collection_name, + std::vector& partition_list) + : BaseRequest(context, BaseRequest::kShowPartitions), + collection_name_(collection_name), + partition_list_(partition_list) { } BaseRequestPtr -ShowPartitionsRequest::Create(const std::shared_ptr& context, const std::string& table_name, - std::vector& partition_list) { - return std::shared_ptr(new ShowPartitionsRequest(context, table_name, partition_list)); +ShowPartitionsRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, std::vector& partition_list) { + return std::shared_ptr(new ShowPartitionsRequest(context, collection_name, partition_list)); } Status ShowPartitionsRequest::OnExecute() { - std::string hdr = "ShowPartitionsRequest(table=" + table_name_ + ")"; + std::string hdr = "ShowPartitionsRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); - // step 1: check table name - auto status = ValidationUtil::ValidateTableName(table_name_); + // step 1: check collection name + auto status = ValidationUtil::ValidateCollectionName(collection_name_); fiu_do_on("ShowPartitionsRequest.OnExecute.invalid_table_name", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { return status; } - // step 2: check table existence - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 2: check collection existence + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } // step 3: get partitions - std::vector schema_array; - status = DBWrapper::DB()->ShowPartitions(table_name_, schema_array); + std::vector schema_array; + status = DBWrapper::DB()->ShowPartitions(collection_name_, schema_array); fiu_do_on("ShowPartitionsRequest.OnExecute.show_partition_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { @@ -73,7 +76,7 @@ ShowPartitionsRequest::OnExecute() { } partition_list_.clear(); - partition_list_.emplace_back(table_name_, milvus::engine::DEFAULT_PARTITON_TAG); + partition_list_.emplace_back(collection_name_, milvus::engine::DEFAULT_PARTITON_TAG); for (auto& schema : schema_array) { partition_list_.emplace_back(schema.owner_table_, schema.partition_tag_); } diff --git a/core/src/server/delivery/request/ShowPartitionsRequest.h b/core/src/server/delivery/request/ShowPartitionsRequest.h index 58221b212d..1ef1b99ef2 100644 --- a/core/src/server/delivery/request/ShowPartitionsRequest.h +++ b/core/src/server/delivery/request/ShowPartitionsRequest.h @@ -23,18 +23,18 @@ namespace server { class ShowPartitionsRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, std::vector& partition_list); protected: - ShowPartitionsRequest(const std::shared_ptr& context, const std::string& table_name, + ShowPartitionsRequest(const std::shared_ptr& context, const std::string& collection_name, std::vector& partition_list); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; std::vector& partition_list_; }; diff --git a/core/src/server/delivery/request/ShowTableInfoRequest.cpp b/core/src/server/delivery/request/ShowTableInfoRequest.cpp index 361b66d86c..de16bf9cba 100644 --- a/core/src/server/delivery/request/ShowTableInfoRequest.cpp +++ b/core/src/server/delivery/request/ShowTableInfoRequest.cpp @@ -44,47 +44,47 @@ ConstructPartitionStat(const engine::PartitionStat& partition_stat, PartitionSta } ShowTableInfoRequest::ShowTableInfoRequest(const std::shared_ptr& context, - const std::string& table_name, TableInfo& table_info) - : BaseRequest(context, BaseRequest::kShowTableInfo), table_name_(table_name), table_info_(table_info) { + const std::string& collection_name, TableInfo& table_info) + : BaseRequest(context, BaseRequest::kShowTableInfo), collection_name_(collection_name), table_info_(table_info) { } BaseRequestPtr -ShowTableInfoRequest::Create(const std::shared_ptr& context, const std::string& table_name, - TableInfo& table_info) { - return std::shared_ptr(new ShowTableInfoRequest(context, table_name, table_info)); +ShowTableInfoRequest::Create(const std::shared_ptr& context, + const std::string& collection_name, TableInfo& table_info) { + return std::shared_ptr(new ShowTableInfoRequest(context, collection_name, table_info)); } Status ShowTableInfoRequest::OnExecute() { - std::string hdr = "ShowTableInfoRequest(table=" + table_name_ + ")"; + std::string hdr = "ShowTableInfoRequest(collection=" + collection_name_ + ")"; TimeRecorderAuto rc(hdr); - // step 1: check table name - auto status = ValidationUtil::ValidateTableName(table_name_); + // step 1: check collection name + auto status = ValidationUtil::ValidateCollectionName(collection_name_); if (!status.ok()) { return status; } - // step 2: check table existence - // only process root table, ignore partition table - engine::meta::TableSchema table_schema; - table_schema.table_id_ = table_name_; + // step 2: check collection existence + // only process root collection, ignore partition collection + engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = collection_name_; status = DBWrapper::DB()->DescribeTable(table_schema); if (!status.ok()) { if (status.code() == DB_NOT_FOUND) { - return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_)); + return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_)); } else { return status; } } else { if (!table_schema.owner_table_.empty()) { - return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_)); + return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_)); } } // step 3: get partitions engine::TableInfo table_info; - status = DBWrapper::DB()->GetTableInfo(table_name_, table_info); + status = DBWrapper::DB()->GetTableInfo(collection_name_, table_info); if (!status.ok()) { return status; } diff --git a/core/src/server/delivery/request/ShowTableInfoRequest.h b/core/src/server/delivery/request/ShowTableInfoRequest.h index b709de7ff8..d97bbb9765 100644 --- a/core/src/server/delivery/request/ShowTableInfoRequest.h +++ b/core/src/server/delivery/request/ShowTableInfoRequest.h @@ -29,18 +29,18 @@ namespace server { class ShowTableInfoRequest : public BaseRequest { public: static BaseRequestPtr - Create(const std::shared_ptr& context, const std::string& table_name, + Create(const std::shared_ptr& context, const std::string& collection_name, TableInfo& table_info); protected: - ShowTableInfoRequest(const std::shared_ptr& context, const std::string& table_name, + ShowTableInfoRequest(const std::shared_ptr& context, const std::string& collection_name, TableInfo& table_info); Status OnExecute() override; private: - const std::string table_name_; + const std::string collection_name_; TableInfo& table_info_; }; diff --git a/core/src/server/delivery/request/ShowTablesRequest.cpp b/core/src/server/delivery/request/ShowTablesRequest.cpp index af28b8e11e..4fc032b9e8 100644 --- a/core/src/server/delivery/request/ShowTablesRequest.cpp +++ b/core/src/server/delivery/request/ShowTablesRequest.cpp @@ -37,7 +37,7 @@ Status ShowTablesRequest::OnExecute() { TimeRecorderAuto rc("ShowTablesRequest"); - std::vector schema_array; + std::vector schema_array; auto status = DBWrapper::DB()->AllTables(schema_array); fiu_do_on("ShowTablesRequest.OnExecute.show_tables_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, "")); if (!status.ok()) { @@ -45,7 +45,7 @@ ShowTablesRequest::OnExecute() { } for (auto& schema : schema_array) { - table_name_list_.push_back(schema.table_id_); + table_name_list_.push_back(schema.collection_id_); } return Status::OK(); } diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.cpp b/core/src/server/grpc_impl/GrpcRequestHandler.cpp index eaf0d47a3d..e32b131055 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.cpp +++ b/core/src/server/grpc_impl/GrpcRequestHandler.cpp @@ -481,9 +481,9 @@ GrpcRequestHandler::DescribeTable(::grpc::ServerContext* context, const ::milvus ::milvus::grpc::TableSchema* response) { CHECK_NULLPTR_RETURN(request); - TableSchema table_schema; + CollectionSchema table_schema; Status status = request_handler_.DescribeTable(context_map_[context], request->table_name(), table_schema); - response->set_table_name(table_schema.table_name_); + response->set_table_name(table_schema.collection_name_); response->set_dimension(table_schema.dimension_); response->set_index_file_size(table_schema.index_file_size_); response->set_metric_type(table_schema.metric_type_); @@ -511,8 +511,8 @@ GrpcRequestHandler::ShowTables(::grpc::ServerContext* context, const ::milvus::g std::vector tables; Status status = request_handler_.ShowTables(context_map_[context], tables); - for (auto& table : tables) { - response->add_table_names(table); + for (auto& collection : tables) { + response->add_table_names(collection); } SET_RESPONSE(response->mutable_status(), status, context); @@ -581,7 +581,7 @@ GrpcRequestHandler::DescribeIndex(::grpc::ServerContext* context, const ::milvus IndexParam param; Status status = request_handler_.DescribeIndex(context_map_[context], request->table_name(), param); - response->set_table_name(param.table_name_); + response->set_table_name(param.collection_name_); response->set_index_type(param.index_type_); ::milvus::grpc::KeyValuePair* kv = response->add_extra_params(); kv->set_key(EXTRA_PARAM_KEY); @@ -645,11 +645,11 @@ GrpcRequestHandler::Flush(::grpc::ServerContext* context, const ::milvus::grpc:: ::milvus::grpc::Status* response) { CHECK_NULLPTR_RETURN(request); - std::vector table_names; + std::vector collection_names; for (int32_t i = 0; i < request->table_name_array().size(); i++) { - table_names.push_back(request->table_name_array(i)); + collection_names.push_back(request->table_name_array(i)); } - Status status = request_handler_.Flush(context_map_[context], table_names); + Status status = request_handler_.Flush(context_map_[context], collection_names); SET_RESPONSE(response, status, context); return ::grpc::Status::OK; diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.h b/core/src/server/grpc_impl/GrpcRequestHandler.h index a75fde16d9..1180fe76fe 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.h +++ b/core/src/server/grpc_impl/GrpcRequestHandler.h @@ -80,36 +80,36 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, random_id() const; // * - // @brief This method is used to create table + // @brief This method is used to create collection // - // @param TableSchema, use to provide table information to be created. + // @param TableSchema, use to provide collection information to be created. // // @return Status ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override; // * - // @brief This method is used to test table existence. + // @brief This method is used to test collection existence. // - // @param TableName, table name is going to be tested. + // @param CollectionName, collection name is going to be tested. // // @return BoolReply ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override; // * - // @brief This method is used to get table schema. + // @brief This method is used to get collection schema. // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return TableSchema ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override; // * - // @brief This method is used to get table schema. + // @brief This method is used to get collection schema. // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return TableRowCount ::grpc::Status @@ -120,14 +120,14 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, // // @param Command, dummy parameter. // - // @return TableNameList + // @return CollectionNameList ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override; // * - // @brief This method is used to get table detail information. + // @brief This method is used to get collection detail information. // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return TableInfo ::grpc::Status @@ -135,16 +135,16 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, ::milvus::grpc::TableInfo* response); // * - // @brief This method is used to delete table. + // @brief This method is used to delete collection. // - // @param TableName, table name is going to be deleted. + // @param CollectionName, collection name is going to be deleted. // - // @return TableNameList + // @return CollectionNameList ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override; // * - // @brief This method is used to build index by table in sync mode. + // @brief This method is used to build index by collection in sync mode. // // @param IndexParam, index paramters. // @@ -155,7 +155,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, // * // @brief This method is used to describe index // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return IndexParam ::grpc::Status @@ -164,7 +164,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, // * // @brief This method is used to drop index // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return Status ::grpc::Status @@ -182,7 +182,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, // * // @brief This method is used to show partition information // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return PartitionList ::grpc::Status @@ -198,7 +198,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) override; // * - // @brief This method is used to add vector array to table. + // @brief This method is used to add vector array to collection. // // @param InsertParam, insert parameters. // @@ -218,14 +218,14 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, // * // @brief This method is used to get vector ids from a segment // - // @param GetVectorIDsParam, target table and segment + // @param GetVectorIDsParam, target collection and segment // // @return VectorIds ::grpc::Status GetVectorIDs(::grpc::ServerContext* context, const ::milvus::grpc::GetVectorIDsParam* request, ::milvus::grpc::VectorIds* response); // * - // @brief This method is used to query vector in table. + // @brief This method is used to query vector in collection. // // @param SearchParam, search parameters. // @@ -275,9 +275,9 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, ::milvus::grpc::Status* response); // * - // @brief This method is used to preload table + // @brief This method is used to preload collection // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return Status ::grpc::Status @@ -294,9 +294,9 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service, Flush(::grpc::ServerContext* context, const ::milvus::grpc::FlushParam* request, ::milvus::grpc::Status* response); // * - // @brief This method is used to compact table + // @brief This method is used to compact collection // - // @param TableName, target table name. + // @param CollectionName, target collection name. // // @return Status ::grpc::Status diff --git a/core/src/server/web_impl/handler/WebRequestHandler.cpp b/core/src/server/web_impl/handler/WebRequestHandler.cpp index 7df1e080a8..eb3c9e110c 100644 --- a/core/src/server/web_impl/handler/WebRequestHandler.cpp +++ b/core/src/server/web_impl/handler/WebRequestHandler.cpp @@ -163,7 +163,7 @@ WebRequestHandler::ParsePartitionStat(const milvus::server::PartitionStat& par_s Status WebRequestHandler::IsBinaryTable(const std::string& collection_name, bool& bin) { - TableSchema schema; + CollectionSchema schema; auto status = request_handler_.DescribeTable(context_ptr_, collection_name, schema); if (status.ok()) { auto metric = engine::MetricType(schema.metric_type_); @@ -209,7 +209,7 @@ WebRequestHandler::CopyRecordsFromJson(const nlohmann::json& json, engine::Vecto ///////////////////////// WebRequestHandler methods /////////////////////////////////////// Status WebRequestHandler::GetTableMetaInfo(const std::string& collection_name, nlohmann::json& json_out) { - TableSchema schema; + CollectionSchema schema; auto status = request_handler_.DescribeTable(context_ptr_, collection_name, schema); if (!status.ok()) { return status; @@ -227,7 +227,7 @@ WebRequestHandler::GetTableMetaInfo(const std::string& collection_name, nlohmann return status; } - json_out["collection_name"] = schema.table_name_; + json_out["collection_name"] = schema.collection_name_; json_out["dimension"] = schema.dimension_; json_out["index_file_size"] = schema.index_file_size_; json_out["index"] = IndexMap.at(engine::EngineType(index_param.index_type_)); @@ -898,7 +898,7 @@ WebRequestHandler::SetGpuConfig(const GPUConfigDto::ObjectWrapper& gpu_config_dt /************* * - * Table { + * Collection { */ StatusDto::ObjectWrapper WebRequestHandler::CreateTable(const TableRequestDto::ObjectWrapper& collection_schema) { @@ -1029,7 +1029,7 @@ WebRequestHandler::DropTable(const OString& collection_name) { */ StatusDto::ObjectWrapper -WebRequestHandler::CreateIndex(const OString& table_name, const OString& body) { +WebRequestHandler::CreateIndex(const OString& collection_name, const OString& body) { try { auto request_json = nlohmann::json::parse(body->std_str()); if (!request_json.contains("index_type")) { @@ -1044,7 +1044,8 @@ WebRequestHandler::CreateIndex(const OString& table_name, const OString& body) { if (!request_json.contains("params")) { RETURN_STATUS_DTO(BODY_FIELD_LOSS, "Field \'params\' is required") } - auto status = request_handler_.CreateIndex(context_ptr_, table_name->std_str(), index, request_json["params"]); + auto status = + request_handler_.CreateIndex(context_ptr_, collection_name->std_str(), index, request_json["params"]); ASSIGN_RETURN_STATUS_DTO(status); } catch (nlohmann::detail::parse_error& e) { RETURN_STATUS_DTO(BODY_PARSE_FAIL, e.what()) diff --git a/core/src/server/web_impl/handler/WebRequestHandler.h b/core/src/server/web_impl/handler/WebRequestHandler.h index 24490b43af..6f18dac27c 100644 --- a/core/src/server/web_impl/handler/WebRequestHandler.h +++ b/core/src/server/web_impl/handler/WebRequestHandler.h @@ -98,25 +98,25 @@ class WebRequestHandler { ParsePartitionStat(const PartitionStat& par_stat, nlohmann::json& json); Status - IsBinaryTable(const std::string& table_name, bool& bin); + IsBinaryTable(const std::string& collection_name, bool& bin); Status CopyRecordsFromJson(const nlohmann::json& json, engine::VectorsData& vectors, bool bin); protected: Status - GetTableMetaInfo(const std::string& table_name, nlohmann::json& json_out); + GetTableMetaInfo(const std::string& collection_name, nlohmann::json& json_out); Status - GetTableStat(const std::string& table_name, nlohmann::json& json_out); + GetTableStat(const std::string& collection_name, nlohmann::json& json_out); Status - GetSegmentVectors(const std::string& table_name, const std::string& segment_name, int64_t page_size, int64_t offset, - nlohmann::json& json_out); + GetSegmentVectors(const std::string& collection_name, const std::string& segment_name, int64_t page_size, + int64_t offset, nlohmann::json& json_out); Status - GetSegmentIds(const std::string& table_name, const std::string& segment_name, int64_t page_size, int64_t offset, - nlohmann::json& json_out); + GetSegmentIds(const std::string& collection_name, const std::string& segment_name, int64_t page_size, + int64_t offset, nlohmann::json& json_out); Status CommandLine(const std::string& cmd, std::string& reply); @@ -140,13 +140,13 @@ class WebRequestHandler { SetConfig(const nlohmann::json& json, std::string& result_str); Status - Search(const std::string& table_name, const nlohmann::json& json, std::string& result_str); + Search(const std::string& collection_name, const nlohmann::json& json, std::string& result_str); Status - DeleteByIDs(const std::string& table_name, const nlohmann::json& json, std::string& result_str); + DeleteByIDs(const std::string& collection_name, const nlohmann::json& json, std::string& result_str); Status - GetVectorsByIDs(const std::string& table_name, const std::vector& ids, nlohmann::json& json_out); + GetVectorsByIDs(const std::string& collection_name, const std::vector& ids, nlohmann::json& json_out); public: WebRequestHandler() { @@ -178,39 +178,39 @@ class WebRequestHandler { ShowTables(const OQueryParams& query_params, OString& result); StatusDto::ObjectWrapper - GetTable(const OString& table_name, const OQueryParams& query_params, OString& result); + GetTable(const OString& collection_name, const OQueryParams& query_params, OString& result); StatusDto::ObjectWrapper - DropTable(const OString& table_name); + DropTable(const OString& collection_name); StatusDto::ObjectWrapper - CreateIndex(const OString& table_name, const OString& body); + CreateIndex(const OString& collection_name, const OString& body); StatusDto::ObjectWrapper - GetIndex(const OString& table_name, OString& result); + GetIndex(const OString& collection_name, OString& result); StatusDto::ObjectWrapper - DropIndex(const OString& table_name); + DropIndex(const OString& collection_name); StatusDto::ObjectWrapper - CreatePartition(const OString& table_name, const PartitionRequestDto::ObjectWrapper& param); + CreatePartition(const OString& collection_name, const PartitionRequestDto::ObjectWrapper& param); StatusDto::ObjectWrapper - ShowPartitions(const OString& table_name, const OQueryParams& query_params, + ShowPartitions(const OString& collection_name, const OQueryParams& query_params, PartitionListDto::ObjectWrapper& partition_list_dto); StatusDto::ObjectWrapper - DropPartition(const OString& table_name, const OString& body); + DropPartition(const OString& collection_name, const OString& body); /*********** * * Segment */ StatusDto::ObjectWrapper - ShowSegments(const OString& table_name, const OQueryParams& query_params, OString& response); + ShowSegments(const OString& collection_name, const OQueryParams& query_params, OString& response); StatusDto::ObjectWrapper - GetSegmentInfo(const OString& table_name, const OString& segment_name, const OString& info, + GetSegmentInfo(const OString& collection_name, const OString& segment_name, const OString& info, const OQueryParams& query_params, OString& result); /** @@ -218,13 +218,13 @@ class WebRequestHandler { * Vector */ StatusDto::ObjectWrapper - Insert(const OString& table_name, const OString& body, VectorIdsDto::ObjectWrapper& ids_dto); + Insert(const OString& collection_name, const OString& body, VectorIdsDto::ObjectWrapper& ids_dto); StatusDto::ObjectWrapper - GetVector(const OString& table_name, const OQueryParams& query_params, OString& response); + GetVector(const OString& collection_name, const OQueryParams& query_params, OString& response); StatusDto::ObjectWrapper - VectorsOp(const OString& table_name, const OString& payload, OString& response); + VectorsOp(const OString& collection_name, const OString& payload, OString& response); /** * diff --git a/core/src/utils/StringHelpFunctions.h b/core/src/utils/StringHelpFunctions.h index 0f1226a8c2..48dd9ceb77 100644 --- a/core/src/utils/StringHelpFunctions.h +++ b/core/src/utils/StringHelpFunctions.h @@ -49,7 +49,7 @@ class StringHelpFunctions { static void MergeStringWithDelimeter(const std::vector& strs, const std::string& delimeter, std::string& result); - // assume the table has two columns, quote='\"', delimeter=',' + // assume the collection has two columns, quote='\"', delimeter=',' // a,b a | b // "aa,gg,yy",b aa,gg,yy | b // aa"dd,rr"kk,pp aadd,rrkk | pp diff --git a/core/src/utils/ValidationUtil.cpp b/core/src/utils/ValidationUtil.cpp index 0808ca582d..f86f688fc2 100644 --- a/core/src/utils/ValidationUtil.cpp +++ b/core/src/utils/ValidationUtil.cpp @@ -98,35 +98,35 @@ CheckParameterExistence(const milvus::json& json_params, const std::string& para } // namespace Status -ValidationUtil::ValidateTableName(const std::string& table_name) { - // Table name shouldn't be empty. - if (table_name.empty()) { - std::string msg = "Table name should not be empty."; +ValidationUtil::ValidateCollectionName(const std::string& collection_name) { + // Collection name shouldn't be empty. + if (collection_name.empty()) { + std::string msg = "Collection name should not be empty."; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_TABLE_NAME, msg); } - std::string invalid_msg = "Invalid table name: " + table_name + ". "; - // Table name size shouldn't exceed 16384. - if (table_name.size() > TABLE_NAME_SIZE_LIMIT) { - std::string msg = invalid_msg + "The length of a table name must be less than 255 characters."; + std::string invalid_msg = "Invalid collection name: " + collection_name + ". "; + // Collection name size shouldn't exceed 16384. + if (collection_name.size() > TABLE_NAME_SIZE_LIMIT) { + std::string msg = invalid_msg + "The length of a collection name must be less than 255 characters."; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_TABLE_NAME, msg); } - // Table name first character should be underscore or character. - char first_char = table_name[0]; + // Collection name first character should be underscore or character. + char first_char = collection_name[0]; if (first_char != '_' && std::isalpha(first_char) == 0) { - std::string msg = invalid_msg + "The first character of a table name must be an underscore or letter."; + std::string msg = invalid_msg + "The first character of a collection name must be an underscore or letter."; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_TABLE_NAME, msg); } - int64_t table_name_size = table_name.size(); + int64_t table_name_size = collection_name.size(); for (int64_t i = 1; i < table_name_size; ++i) { - char name_char = table_name[i]; + char name_char = collection_name[i]; if (name_char != '_' && std::isalnum(name_char) == 0) { - std::string msg = invalid_msg + "Table name can only contain numbers, letters, and underscores."; + std::string msg = invalid_msg + "Collection name can only contain numbers, letters, and underscores."; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_TABLE_NAME, msg); } @@ -138,8 +138,8 @@ ValidationUtil::ValidateTableName(const std::string& table_name) { Status ValidationUtil::ValidateTableDimension(int64_t dimension, int64_t metric_type) { if (dimension <= 0 || dimension > TABLE_DIMENSION_LIMIT) { - std::string msg = "Invalid table dimension: " + std::to_string(dimension) + ". " + - "The table dimension must be within the range of 1 ~ " + + std::string msg = "Invalid collection dimension: " + std::to_string(dimension) + ". " + + "The collection dimension must be within the range of 1 ~ " + std::to_string(TABLE_DIMENSION_LIMIT) + "."; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_VECTOR_DIMENSION, msg); @@ -147,8 +147,8 @@ ValidationUtil::ValidateTableDimension(int64_t dimension, int64_t metric_type) { if (milvus::engine::utils::IsBinaryMetricType(metric_type)) { if ((dimension % 8) != 0) { - std::string msg = "Invalid table dimension: " + std::to_string(dimension) + ". " + - "The table dimension must be a multiple of 8"; + std::string msg = "Invalid collection dimension: " + std::to_string(dimension) + ". " + + "The collection dimension must be a multiple of 8"; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_VECTOR_DIMENSION, msg); } @@ -180,8 +180,8 @@ ValidationUtil::ValidateTableIndexType(int32_t index_type) { } Status -ValidationUtil::ValidateIndexParams(const milvus::json& index_params, const engine::meta::TableSchema& table_schema, - int32_t index_type) { +ValidationUtil::ValidateIndexParams(const milvus::json& index_params, + const engine::meta::CollectionSchema& table_schema, int32_t index_type) { switch (index_type) { case (int32_t)engine::EngineType::FAISS_IDMAP: case (int32_t)engine::EngineType::FAISS_BIN_IDMAP: { @@ -213,7 +213,7 @@ ValidationUtil::ValidateIndexParams(const milvus::json& index_params, const engi milvus::knowhere::IVFPQConfAdapter::GetValidMList(table_schema.dimension_, resset); int64_t m_value = index_params[index_params, knowhere::IndexParams::m]; if (resset.empty()) { - std::string msg = "Invalid table dimension, unable to get reasonable values for 'm'"; + std::string msg = "Invalid collection dimension, unable to get reasonable values for 'm'"; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_TABLE_DIMENSION, msg); } @@ -270,8 +270,8 @@ ValidationUtil::ValidateIndexParams(const milvus::json& index_params, const engi } Status -ValidationUtil::ValidateSearchParams(const milvus::json& search_params, const engine::meta::TableSchema& table_schema, - int64_t topk) { +ValidationUtil::ValidateSearchParams(const milvus::json& search_params, + const engine::meta::CollectionSchema& table_schema, int64_t topk) { switch (table_schema.engine_type_) { case (int32_t)engine::EngineType::FAISS_IDMAP: case (int32_t)engine::EngineType::FAISS_BIN_IDMAP: { @@ -307,7 +307,8 @@ ValidationUtil::ValidateSearchParams(const milvus::json& search_params, const en } Status -ValidationUtil::ValidateVectorData(const engine::VectorsData& vectors, const engine::meta::TableSchema& table_schema) { +ValidationUtil::ValidateVectorData(const engine::VectorsData& vectors, + const engine::meta::CollectionSchema& table_schema) { if (vectors.float_data_.empty() && vectors.binary_data_.empty()) { return Status(SERVER_INVALID_ROWRECORD_ARRAY, "The vector array is empty. Make sure you have entered vector records."); @@ -317,22 +318,24 @@ ValidationUtil::ValidateVectorData(const engine::VectorsData& vectors, const eng if (engine::utils::IsBinaryMetricType(table_schema.metric_type_)) { // check prepared binary data if (vectors.binary_data_.size() % vector_count != 0) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, "The vector dimension must be equal to the table dimension."); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, + "The vector dimension must be equal to the collection dimension."); } if (vectors.binary_data_.size() * 8 / vector_count != table_schema.dimension_) { return Status(SERVER_INVALID_VECTOR_DIMENSION, - "The vector dimension must be equal to the table dimension."); + "The vector dimension must be equal to the collection dimension."); } } else { // check prepared float data fiu_do_on("SearchRequest.OnExecute.invalod_rowrecord_array", vector_count = vectors.float_data_.size() + 1); if (vectors.float_data_.size() % vector_count != 0) { - return Status(SERVER_INVALID_ROWRECORD_ARRAY, "The vector dimension must be equal to the table dimension."); + return Status(SERVER_INVALID_ROWRECORD_ARRAY, + "The vector dimension must be equal to the collection dimension."); } if (vectors.float_data_.size() / vector_count != table_schema.dimension_) { return Status(SERVER_INVALID_VECTOR_DIMENSION, - "The vector dimension must be equal to the table dimension."); + "The vector dimension must be equal to the collection dimension."); } } @@ -384,14 +387,14 @@ ValidationUtil::ValidatePartitionName(const std::string& partition_name) { } std::string invalid_msg = "Invalid partition name: " + partition_name + ". "; - // Table name size shouldn't exceed 16384. + // Collection name size shouldn't exceed 16384. if (partition_name.size() > TABLE_NAME_SIZE_LIMIT) { std::string msg = invalid_msg + "The length of a partition name must be less than 255 characters."; SERVER_LOG_ERROR << msg; return Status(SERVER_INVALID_TABLE_NAME, msg); } - // Table name first character should be underscore or character. + // Collection name first character should be underscore or character. char first_char = partition_name[0]; if (first_char != '_' && std::isalpha(first_char) == 0) { std::string msg = invalid_msg + "The first character of a partition name must be an underscore or letter."; diff --git a/core/src/utils/ValidationUtil.h b/core/src/utils/ValidationUtil.h index ec8960cad2..a939e09f71 100644 --- a/core/src/utils/ValidationUtil.h +++ b/core/src/utils/ValidationUtil.h @@ -30,7 +30,7 @@ class ValidationUtil { public: static Status - ValidateTableName(const std::string& table_name); + ValidateCollectionName(const std::string& collection_name); static Status ValidateTableDimension(int64_t dimension, int64_t metric_type); @@ -39,15 +39,15 @@ class ValidationUtil { ValidateTableIndexType(int32_t index_type); static Status - ValidateIndexParams(const milvus::json& index_params, const engine::meta::TableSchema& table_schema, + ValidateIndexParams(const milvus::json& index_params, const engine::meta::CollectionSchema& table_schema, int32_t index_type); static Status - ValidateSearchParams(const milvus::json& search_params, const engine::meta::TableSchema& table_schema, + ValidateSearchParams(const milvus::json& search_params, const engine::meta::CollectionSchema& table_schema, int64_t topk); static Status - ValidateVectorData(const engine::VectorsData& vectors, const engine::meta::TableSchema& table_schema); + ValidateVectorData(const engine::VectorsData& vectors, const engine::meta::CollectionSchema& table_schema); static Status ValidateTableIndexFileSize(int64_t index_file_size); diff --git a/core/unittest/db/test_db.cpp b/core/unittest/db/test_db.cpp index 04cba547bb..c014abae10 100644 --- a/core/unittest/db/test_db.cpp +++ b/core/unittest/db/test_db.cpp @@ -37,11 +37,11 @@ static constexpr int64_t INSERT_LOOP = 1000; static constexpr int64_t SECONDS_EACH_HOUR = 3600; static constexpr int64_t DAY_SECONDS = 24 * 60 * 60; -milvus::engine::meta::TableSchema +milvus::engine::meta::CollectionSchema BuildTableSchema() { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = TABLE_NAME; + table_info.collection_id_ = TABLE_NAME; return table_info; } @@ -163,11 +163,11 @@ TEST_F(DBTest, CONFIG_TEST) { } TEST_F(DBTest, DB_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -267,11 +267,11 @@ TEST_F(DBTest, SEARCH_TEST) { milvus::server::Config& config = milvus::server::Config::GetInstance(); milvus::Status s = config.LoadConfigFile(config_path); - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -437,11 +437,11 @@ TEST_F(DBTest, SEARCH_TEST) { TEST_F(DBTest, PRELOADTABLE_TEST) { fiu_init(0); - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -496,7 +496,7 @@ TEST_F(DBTest, PRELOADTABLE_TEST) { TEST_F(DBTest, SHUTDOWN_TEST) { db_->Stop(); - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_FALSE(stat.ok()); @@ -515,51 +515,51 @@ TEST_F(DBTest, SHUTDOWN_TEST) { stat = db_->DropPartitionByTag(TABLE_NAME, "0"); ASSERT_FALSE(stat.ok()); - std::vector partition_schema_array; + std::vector partition_schema_array; stat = db_->ShowPartitions(TABLE_NAME, partition_schema_array); ASSERT_FALSE(stat.ok()); - std::vector table_infos; + std::vector table_infos; stat = db_->AllTables(table_infos); ASSERT_EQ(stat.code(), milvus::DB_ERROR); bool has_table = false; - stat = db_->HasTable(table_info.table_id_, has_table); + stat = db_->HasTable(table_info.collection_id_, has_table); ASSERT_FALSE(stat.ok()); milvus::engine::VectorsData xb; - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_FALSE(stat.ok()); stat = db_->Flush(); ASSERT_FALSE(stat.ok()); - stat = db_->DeleteVector(table_info.table_id_, 0); + stat = db_->DeleteVector(table_info.collection_id_, 0); ASSERT_FALSE(stat.ok()); milvus::engine::IDNumbers ids_to_delete{0}; - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); ASSERT_FALSE(stat.ok()); - stat = db_->Compact(table_info.table_id_); + stat = db_->Compact(table_info.collection_id_); ASSERT_FALSE(stat.ok()); milvus::engine::VectorsData vector; - stat = db_->GetVectorByID(table_info.table_id_, 0, vector); + stat = db_->GetVectorByID(table_info.collection_id_, 0, vector); ASSERT_FALSE(stat.ok()); - stat = db_->PreloadTable(table_info.table_id_); + stat = db_->PreloadTable(table_info.collection_id_); ASSERT_FALSE(stat.ok()); uint64_t row_count = 0; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_FALSE(stat.ok()); milvus::engine::TableIndex index; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_FALSE(stat.ok()); - stat = db_->DescribeIndex(table_info.table_id_, index); + stat = db_->DescribeIndex(table_info.collection_id_, index); ASSERT_FALSE(stat.ok()); stat = db_->DropIndex(TABLE_NAME); @@ -569,7 +569,8 @@ TEST_F(DBTest, SHUTDOWN_TEST) { milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; milvus::json json_params = {{"nprobe", 1}}; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, 1, json_params, xb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, 1, json_params, xb, result_ids, result_distances); ASSERT_FALSE(stat.ok()); std::vector file_ids; stat = db_->QueryByFileID(dummy_context_, @@ -582,7 +583,7 @@ TEST_F(DBTest, SHUTDOWN_TEST) { ASSERT_FALSE(stat.ok()); stat = db_->Query(dummy_context_, - table_info.table_id_, + table_info.collection_id_, tags, 1, json_params, @@ -591,13 +592,13 @@ TEST_F(DBTest, SHUTDOWN_TEST) { result_distances); ASSERT_FALSE(stat.ok()); - stat = db_->DropTable(table_info.table_id_); + stat = db_->DropTable(table_info.collection_id_); ASSERT_FALSE(stat.ok()); } TEST_F(DBTest, BACK_TIMER_THREAD_1) { fiu_init(0); - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); milvus::Status stat; // test background timer thread { @@ -632,7 +633,7 @@ TEST_F(DBTest, BACK_TIMER_THREAD_1) { TEST_F(DBTest, BACK_TIMER_THREAD_2) { fiu_init(0); milvus::Status stat; - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -656,7 +657,7 @@ TEST_F(DBTest, BACK_TIMER_THREAD_2) { TEST_F(DBTest, BACK_TIMER_THREAD_3) { fiu_init(0); milvus::Status stat; - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -681,7 +682,7 @@ TEST_F(DBTest, BACK_TIMER_THREAD_3) { TEST_F(DBTest, BACK_TIMER_THREAD_4) { fiu_init(0); milvus::Status stat; - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -704,7 +705,7 @@ TEST_F(DBTest, BACK_TIMER_THREAD_4) { } TEST_F(DBTest, INDEX_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); uint64_t nb = VECTOR_COUNT; @@ -717,55 +718,55 @@ TEST_F(DBTest, INDEX_TEST) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; index.metric_type_ = (int)milvus::engine::MetricType::IP; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); fiu_init(0); FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTableIndex.throw_exception"); - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_FALSE(stat.ok()); fiu_disable("SqliteMetaImpl.DescribeTableIndex.throw_exception"); index.engine_type_ = (int)milvus::engine::EngineType::FAISS_PQ; FIU_ENABLE_FIU("DBImpl.UpdateTableIndexRecursively.fail_update_table_index"); - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_FALSE(stat.ok()); fiu_disable("DBImpl.UpdateTableIndexRecursively.fail_update_table_index"); #ifdef MILVUS_GPU_VERSION index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); #endif milvus::engine::TableIndex index_out; - stat = db_->DescribeIndex(table_info.table_id_, index_out); + stat = db_->DescribeIndex(table_info.collection_id_, index_out); ASSERT_TRUE(stat.ok()); ASSERT_EQ(index.engine_type_, index_out.engine_type_); ASSERT_EQ(index.extra_params_, index_out.extra_params_); ASSERT_EQ(table_info.metric_type_, index_out.metric_type_); - stat = db_->DropIndex(table_info.table_id_); + stat = db_->DropIndex(table_info.collection_id_); ASSERT_TRUE(stat.ok()); } TEST_F(DBTest, PARTITION_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); // create partition and insert data const int64_t PARTITION_COUNT = 5; const int64_t INSERT_BATCH = 2000; - std::string table_name = TABLE_NAME; + std::string collection_name = TABLE_NAME; for (int64_t i = 0; i < PARTITION_COUNT; i++) { std::string partition_tag = std::to_string(i); - std::string partition_name = table_name + "_" + partition_tag; - stat = db_->CreatePartition(table_name, partition_name, partition_tag); + std::string partition_name = collection_name + "_" + partition_tag; + stat = db_->CreatePartition(collection_name, partition_name, partition_tag); ASSERT_TRUE(stat.ok()); // not allow nested partition @@ -773,7 +774,7 @@ TEST_F(DBTest, PARTITION_TEST) { ASSERT_FALSE(stat.ok()); // not allow duplicated partition - stat = db_->CreatePartition(table_name, partition_name, partition_tag); + stat = db_->CreatePartition(collection_name, partition_name, partition_tag); ASSERT_FALSE(stat.ok()); milvus::engine::VectorsData xb; @@ -785,7 +786,7 @@ TEST_F(DBTest, PARTITION_TEST) { vector_ids[k] = i * INSERT_BATCH + k; } - db_->InsertVectors(table_name, partition_tag, xb); + db_->InsertVectors(collection_name, partition_tag, xb); ASSERT_EQ(vector_ids.size(), INSERT_BATCH); // insert data into not existed partition @@ -794,20 +795,20 @@ TEST_F(DBTest, PARTITION_TEST) { } // duplicated partition is not allowed - stat = db_->CreatePartition(table_name, "", "0"); + stat = db_->CreatePartition(collection_name, "", "0"); ASSERT_FALSE(stat.ok()); - std::vector partition_schema_array; - stat = db_->ShowPartitions(table_name, partition_schema_array); + std::vector partition_schema_array; + stat = db_->ShowPartitions(collection_name, partition_schema_array); ASSERT_TRUE(stat.ok()); ASSERT_EQ(partition_schema_array.size(), PARTITION_COUNT); for (int64_t i = 0; i < PARTITION_COUNT; i++) { - ASSERT_EQ(partition_schema_array[i].table_id_, table_name + "_" + std::to_string(i)); + ASSERT_EQ(partition_schema_array[i].collection_id_, collection_name + "_" + std::to_string(i)); } - // check table existence + // check collection existence std::string special_part = "special"; - stat = db_->CreatePartition(table_name, special_part, special_part); + stat = db_->CreatePartition(collection_name, special_part, special_part); ASSERT_TRUE(stat.ok()); bool has_table = false; stat = db_->HasNativeTable(special_part, has_table); @@ -819,17 +820,17 @@ TEST_F(DBTest, PARTITION_TEST) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT; index.metric_type_ = (int)milvus::engine::MetricType::L2; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); fiu_init(0); FIU_ENABLE_FIU("DBImpl.WaitTableIndexRecursively.fail_build_table_Index_for_partition"); - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_FALSE(stat.ok()); fiu_disable("DBImpl.WaitTableIndexRecursively.fail_build_table_Index_for_partition"); FIU_ENABLE_FIU("DBImpl.WaitTableIndexRecursively.not_empty_err_msg"); - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_FALSE(stat.ok()); fiu_disable("DBImpl.WaitTableIndexRecursively.not_empty_err_msg"); @@ -866,7 +867,7 @@ TEST_F(DBTest, PARTITION_TEST) { ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids.size() / topk, nq); - // search in whole table + // search in whole collection tags.clear(); result_ids.clear(); result_distances.clear(); @@ -883,47 +884,47 @@ TEST_F(DBTest, PARTITION_TEST) { ASSERT_EQ(result_ids.size() / topk, nq); } - stat = db_->DropPartition(table_name + "_0"); + stat = db_->DropPartition(collection_name + "_0"); ASSERT_TRUE(stat.ok()); - stat = db_->DropPartitionByTag(table_name, "1"); + stat = db_->DropPartitionByTag(collection_name, "1"); ASSERT_TRUE(stat.ok()); FIU_ENABLE_FIU("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition"); - stat = db_->DropIndex(table_info.table_id_); + stat = db_->DropIndex(table_info.collection_id_); ASSERT_FALSE(stat.ok()); fiu_disable("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition"); FIU_ENABLE_FIU("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition"); - stat = db_->DropIndex(table_info.table_id_); + stat = db_->DropIndex(table_info.collection_id_); ASSERT_FALSE(stat.ok()); fiu_disable("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition"); - stat = db_->DropIndex(table_name); + stat = db_->DropIndex(collection_name); ASSERT_TRUE(stat.ok()); - stat = db_->DropTable(table_name); + stat = db_->DropTable(collection_name); ASSERT_TRUE(stat.ok()); } TEST_F(DBTest2, ARHIVE_DISK_CHECK) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - std::vector table_schema_array; + std::vector table_schema_array; stat = db_->AllTables(table_schema_array); ASSERT_TRUE(stat.ok()); bool bfound = false; for (auto& schema : table_schema_array) { - if (schema.table_id_ == TABLE_NAME) { + if (schema.collection_id_ == TABLE_NAME) { bfound = true; break; } } ASSERT_TRUE(bfound); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -949,11 +950,11 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) { } TEST_F(DBTest2, DELETE_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); @@ -973,11 +974,11 @@ TEST_F(DBTest2, DELETE_TEST) { milvus::engine::TableIndex index; stat = db_->CreateIndex(TABLE_NAME, index); - // create partition, drop table will drop partition recursively + // create partition, drop collection will drop partition recursively stat = db_->CreatePartition(TABLE_NAME, "part0", "0"); ASSERT_TRUE(stat.ok()); - // fail drop table + // fail drop collection fiu_init(0); FIU_ENABLE_FIU("DBImpl.DropTableRecursively.failed"); stat = db_->DropTable(TABLE_NAME); @@ -994,8 +995,8 @@ TEST_F(DBTest2, DELETE_TEST) { } TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) { - std::string table_name = TABLE_NAME; - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); + std::string collection_name = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); auto stat = db_->CreateTable(table_schema); uint64_t nb = VECTOR_COUNT; @@ -1003,21 +1004,21 @@ TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) { BuildVectors(nb, 0, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(table_name, "", xb); + stat = db_->InsertVectors(collection_name, "", xb); // create partition and insert data const int64_t PARTITION_COUNT = 2; const int64_t INSERT_BATCH = 2000; for (int64_t i = 0; i < PARTITION_COUNT; i++) { std::string partition_tag = std::to_string(i); - std::string partition_name = table_name + "_" + partition_tag; - stat = db_->CreatePartition(table_name, partition_name, partition_tag); + std::string partition_name = collection_name + "_" + partition_tag; + stat = db_->CreatePartition(collection_name, partition_name, partition_tag); ASSERT_TRUE(stat.ok()); milvus::engine::VectorsData xb; BuildVectors(INSERT_BATCH, i, xb); - db_->InsertVectors(table_name, partition_tag, xb); + db_->InsertVectors(collection_name, partition_tag, xb); } stat = db_->Flush(); @@ -1025,7 +1026,7 @@ TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) { { milvus::engine::TableInfo table_info; - stat = db_->GetTableInfo(table_name, table_info); + stat = db_->GetTableInfo(collection_name, table_info); ASSERT_TRUE(stat.ok()); int64_t row_count = 0; for (auto& part : table_info.partitions_stat_) { @@ -1045,7 +1046,7 @@ TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) { } TEST_F(DBTestWAL, DB_INSERT_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -1055,26 +1056,26 @@ TEST_F(DBTestWAL, DB_INSERT_TEST) { std::string partition_name = "part_name"; std::string partition_tag = "part_tag"; - stat = db_->CreatePartition(table_info.table_id_, partition_name, partition_tag); + stat = db_->CreatePartition(table_info.collection_id_, partition_name, partition_tag); ASSERT_TRUE(stat.ok()); - stat = db_->InsertVectors(table_info.table_id_, partition_tag, qxb); + stat = db_->InsertVectors(table_info.collection_id_, partition_tag, qxb); ASSERT_TRUE(stat.ok()); - stat = db_->InsertVectors(table_info.table_id_, "", qxb); + stat = db_->InsertVectors(table_info.collection_id_, "", qxb); ASSERT_TRUE(stat.ok()); - stat = db_->InsertVectors(table_info.table_id_, "not exist", qxb); + stat = db_->InsertVectors(table_info.collection_id_, "not exist", qxb); ASSERT_FALSE(stat.ok()); - db_->Flush(table_info.table_id_); + db_->Flush(table_info.collection_id_); - stat = db_->DropTable(table_info.table_id_); + stat = db_->DropTable(table_info.collection_id_); ASSERT_TRUE(stat.ok()); } TEST_F(DBTestWAL, DB_STOP_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -1082,7 +1083,7 @@ TEST_F(DBTestWAL, DB_STOP_TEST) { for (int i = 0; i < 5; i++) { milvus::engine::VectorsData qxb; BuildVectors(qb, i, qxb); - stat = db_->InsertVectors(table_info.table_id_, "", qxb); + stat = db_->InsertVectors(table_info.collection_id_, "", qxb); ASSERT_TRUE(stat.ok()); } @@ -1096,16 +1097,17 @@ TEST_F(DBTestWAL, DB_STOP_TEST) { milvus::engine::ResultDistances result_distances; milvus::engine::VectorsData qxb; BuildVectors(qb, 0, qxb); - stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, json_params, qxb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids.size() / topk, qb); - stat = db_->DropTable(table_info.table_id_); + stat = db_->DropTable(table_info.collection_id_); ASSERT_TRUE(stat.ok()); } TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -1114,7 +1116,7 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) { for (int i = 0; i < 5; i++) { milvus::engine::VectorsData qxb; BuildVectors(qb, i, qxb); - stat = db_->InsertVectors(table_info.table_id_, "", qxb); + stat = db_->InsertVectors(table_info.collection_id_, "", qxb); ASSERT_TRUE(stat.ok()); } @@ -1125,7 +1127,8 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) { milvus::engine::ResultDistances result_distances; milvus::engine::VectorsData qxb; BuildVectors(qb, 0, qxb); - stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, json_params, qxb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_NE(result_ids.size() / topk, qb); @@ -1138,20 +1141,22 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) { result_ids.clear(); result_distances.clear(); - stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, json_params, qxb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids.size(), 0); db_->Flush(); result_ids.clear(); result_distances.clear(); - stat = db_->Query(dummy_context_, table_info.table_id_, {}, topk, json_params, qxb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids.size() / topk, qb); } TEST_F(DBTestWALRecovery_Error, RECOVERY_WITH_INVALID_LOG_FILE) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -1159,7 +1164,7 @@ TEST_F(DBTestWALRecovery_Error, RECOVERY_WITH_INVALID_LOG_FILE) { milvus::engine::VectorsData qxb; BuildVectors(qb, 0, qxb); - stat = db_->InsertVectors(table_info.table_id_, "", qxb); + stat = db_->InsertVectors(table_info.collection_id_, "", qxb); ASSERT_TRUE(stat.ok()); fiu_init(0); @@ -1185,7 +1190,7 @@ TEST_F(DBTest2, GET_VECTOR_NON_EXISTING_TABLE) { } TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); @@ -1195,13 +1200,13 @@ TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) { std::string partition_name = "part_name"; std::string partition_tag = "part_tag"; - stat = db_->CreatePartition(table_info.table_id_, partition_name, partition_tag); + stat = db_->CreatePartition(table_info.collection_id_, partition_name, partition_tag); ASSERT_TRUE(stat.ok()); - stat = db_->InsertVectors(table_info.table_id_, partition_tag, qxb); + stat = db_->InsertVectors(table_info.collection_id_, partition_tag, qxb); ASSERT_TRUE(stat.ok()); - db_->Flush(table_info.table_id_); + db_->Flush(table_info.collection_id_); milvus::engine::VectorsData vector_data; stat = db_->GetVectorByID(TABLE_NAME, qxb.id_array_[0], vector_data); @@ -1215,7 +1220,7 @@ TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) { } TEST_F(DBTest2, GET_VECTOR_IDS_TEST) { - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); auto stat = db_->CreateTable(table_schema); ASSERT_TRUE(stat.ok()); @@ -1274,7 +1279,7 @@ TEST_F(DBTest2, INSERT_DUPLICATE_ID) { options.wal_enable_ = false; db_ = milvus::engine::DBFactory::Build(options); - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); auto stat = db_->CreateTable(table_schema); ASSERT_TRUE(stat.ok()); @@ -1295,7 +1300,7 @@ TEST_F(DBTest2, INSERT_DUPLICATE_ID) { /* TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); // table_info.index_file_size_ = 1 * milvus::engine::M; auto stat = db_->CreateTable(table_info); @@ -1324,10 +1329,10 @@ TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) { milvus::engine::TableIndex index; // index.metric_type_ = (int)milvus::engine::MetricType::IP; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); - stat = db_->PreloadTable(table_info.table_id_); + stat = db_->PreloadTable(table_info.collection_id_); ASSERT_TRUE(stat.ok()); int topk = 10, nprobe = 10; @@ -1339,20 +1344,20 @@ TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) { milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, id, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids[0], id); ASSERT_LT(result_distances[0], 1e-4); } - db_->DropIndex(table_info.table_id_); + db_->DropIndex(table_info.collection_id_); index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); - stat = db_->PreloadTable(table_info.table_id_); + stat = db_->PreloadTable(table_info.collection_id_); ASSERT_TRUE(stat.ok()); for (auto id : ids_to_search) { @@ -1361,7 +1366,7 @@ result_distances); milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, id, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids[0], id); diff --git a/core/unittest/db/test_db_mysql.cpp b/core/unittest/db/test_db_mysql.cpp index 0a5a8ce84c..007e549abe 100644 --- a/core/unittest/db/test_db_mysql.cpp +++ b/core/unittest/db/test_db_mysql.cpp @@ -29,11 +29,11 @@ static constexpr int64_t TABLE_DIM = 256; static constexpr int64_t VECTOR_COUNT = 25000; static constexpr int64_t INSERT_LOOP = 1000; -milvus::engine::meta::TableSchema +milvus::engine::meta::CollectionSchema BuildTableSchema() { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = TABLE_NAME; + table_info.collection_id_ = TABLE_NAME; table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP; return table_info; } @@ -55,11 +55,11 @@ BuildVectors(uint64_t n, uint64_t batch_index, milvus::engine::VectorsData& vect } // namespace TEST_F(MySqlDBTest, DB_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -140,11 +140,11 @@ TEST_F(MySqlDBTest, DB_TEST) { } TEST_F(MySqlDBTest, SEARCH_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -196,15 +196,15 @@ TEST_F(MySqlDBTest, SEARCH_TEST) { } TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - std::vector table_schema_array; + std::vector table_schema_array; stat = db_->AllTables(table_schema_array); ASSERT_TRUE(stat.ok()); bool bfound = false; for (auto& schema : table_schema_array) { - if (schema.table_id_ == TABLE_NAME) { + if (schema.collection_id_ == TABLE_NAME) { bfound = true; break; } @@ -222,8 +222,8 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) { fiu_disable("MySQLMetaImpl.AllTable.null_connection"); fiu_disable("MySQLMetaImpl.AllTable.throw_exception"); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -263,12 +263,12 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) { } TEST_F(MySqlDBTest, DELETE_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); // std::cout << stat.ToString() << std::endl; - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = TABLE_NAME; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = TABLE_NAME; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); @@ -305,23 +305,23 @@ TEST_F(MySqlDBTest, DELETE_TEST) { } TEST_F(MySqlDBTest, PARTITION_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); // create partition and insert data const int64_t PARTITION_COUNT = 5; const int64_t INSERT_BATCH = 2000; - std::string table_name = TABLE_NAME; + std::string collection_name = TABLE_NAME; for (int64_t i = 0; i < PARTITION_COUNT; i++) { std::string partition_tag = std::to_string(i); - std::string partition_name = table_name + "_" + partition_tag; - stat = db_->CreatePartition(table_name, partition_name, partition_tag); + std::string partition_name = collection_name + "_" + partition_tag; + stat = db_->CreatePartition(collection_name, partition_name, partition_tag); ASSERT_TRUE(stat.ok()); fiu_init(0); FIU_ENABLE_FIU("MySQLMetaImpl.CreatePartition.aleady_exist"); - stat = db_->CreatePartition(table_name, partition_name, partition_tag); + stat = db_->CreatePartition(collection_name, partition_name, partition_tag); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.CreatePartition.aleady_exist"); @@ -330,7 +330,7 @@ TEST_F(MySqlDBTest, PARTITION_TEST) { ASSERT_FALSE(stat.ok()); // not allow duplicated partition - stat = db_->CreatePartition(table_name, partition_name, partition_tag); + stat = db_->CreatePartition(collection_name, partition_name, partition_tag); ASSERT_FALSE(stat.ok()); milvus::engine::IDNumbers vector_ids; @@ -342,27 +342,27 @@ TEST_F(MySqlDBTest, PARTITION_TEST) { milvus::engine::VectorsData xb; BuildVectors(INSERT_BATCH, i, xb); - db_->InsertVectors(table_name, partition_tag, xb); + db_->InsertVectors(collection_name, partition_tag, xb); ASSERT_EQ(vector_ids.size(), INSERT_BATCH); } // duplicated partition is not allowed - stat = db_->CreatePartition(table_name, "", "0"); + stat = db_->CreatePartition(collection_name, "", "0"); ASSERT_FALSE(stat.ok()); - std::vector partition_schema_array; - stat = db_->ShowPartitions(table_name, partition_schema_array); + std::vector partition_schema_array; + stat = db_->ShowPartitions(collection_name, partition_schema_array); ASSERT_TRUE(stat.ok()); ASSERT_EQ(partition_schema_array.size(), PARTITION_COUNT); for (int64_t i = 0; i < PARTITION_COUNT; i++) { - ASSERT_EQ(partition_schema_array[i].table_id_, table_name + "_" + std::to_string(i)); + ASSERT_EQ(partition_schema_array[i].collection_id_, collection_name + "_" + std::to_string(i)); } { // build index milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT; index.metric_type_ = (int)milvus::engine::MetricType::L2; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); uint64_t row_count = 0; @@ -387,7 +387,7 @@ TEST_F(MySqlDBTest, PARTITION_TEST) { ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids.size() / topk, nq); - // search in whole table + // search in whole collection tags.clear(); result_ids.clear(); result_distances.clear(); @@ -407,79 +407,79 @@ TEST_F(MySqlDBTest, PARTITION_TEST) { fiu_init(0); { //create partition with dummy name - stat = db_->CreatePartition(table_name, "", "6"); + stat = db_->CreatePartition(collection_name, "", "6"); ASSERT_TRUE(stat.ok()); // ensure DescribeTable failed FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception"); - stat = db_->CreatePartition(table_name, "", "7"); + stat = db_->CreatePartition(collection_name, "", "7"); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception"); - //Drop partition will failed,since it firstly drop partition meta table. + //Drop partition will failed,since it firstly drop partition meta collection. FIU_ENABLE_FIU("MySQLMetaImpl.DropTable.null_connection"); - stat = db_->DropPartition(table_name + "_5"); + stat = db_->DropPartition(collection_name + "_5"); //TODO(sjh): add assert expr, since DropPartion always return Status::OK() for now. //ASSERT_TRUE(stat.ok()); fiu_disable("MySQLMetaImpl.DropTable.null_connection"); - std::vector partition_schema_array; - stat = db_->ShowPartitions(table_name, partition_schema_array); + std::vector partition_schema_array; + stat = db_->ShowPartitions(collection_name, partition_schema_array); ASSERT_TRUE(stat.ok()); ASSERT_EQ(partition_schema_array.size(), PARTITION_COUNT + 1); FIU_ENABLE_FIU("MySQLMetaImpl.ShowPartitions.null_connection"); - stat = db_->ShowPartitions(table_name, partition_schema_array); + stat = db_->ShowPartitions(collection_name, partition_schema_array); ASSERT_FALSE(stat.ok()); FIU_ENABLE_FIU("MySQLMetaImpl.ShowPartitions.throw_exception"); - stat = db_->ShowPartitions(table_name, partition_schema_array); + stat = db_->ShowPartitions(collection_name, partition_schema_array); ASSERT_FALSE(stat.ok()); FIU_ENABLE_FIU("MySQLMetaImpl.DropTable.throw_exception"); - stat = db_->DropPartition(table_name + "_4"); + stat = db_->DropPartition(collection_name + "_4"); fiu_disable("MySQLMetaImpl.DropTable.throw_exception"); - stat = db_->DropPartition(table_name + "_0"); + stat = db_->DropPartition(collection_name + "_0"); ASSERT_TRUE(stat.ok()); } { FIU_ENABLE_FIU("MySQLMetaImpl.GetPartitionName.null_connection"); - stat = db_->DropPartitionByTag(table_name, "1"); + stat = db_->DropPartitionByTag(collection_name, "1"); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.GetPartitionName.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.GetPartitionName.throw_exception"); - stat = db_->DropPartitionByTag(table_name, "1"); + stat = db_->DropPartitionByTag(collection_name, "1"); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.GetPartitionName.throw_exception"); - stat = db_->DropPartitionByTag(table_name, "1"); + stat = db_->DropPartitionByTag(collection_name, "1"); ASSERT_TRUE(stat.ok()); - stat = db_->CreatePartition(table_name, table_name + "_1", "1"); + stat = db_->CreatePartition(collection_name, collection_name + "_1", "1"); FIU_ENABLE_FIU("MySQLMetaImpl.DeleteTableFiles.null_connection"); - stat = db_->DropPartition(table_name + "_1"); + stat = db_->DropPartition(collection_name + "_1"); fiu_disable("MySQLMetaImpl.DeleteTableFiles.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.DeleteTableFiles.throw_exception"); - stat = db_->DropPartition(table_name + "_1"); + stat = db_->DropPartition(collection_name + "_1"); fiu_disable("MySQLMetaImpl.DeleteTableFiles.throw_exception"); } { FIU_ENABLE_FIU("MySQLMetaImpl.DropTableIndex.null_connection"); - stat = db_->DropIndex(table_name); + stat = db_->DropIndex(collection_name); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.DropTableIndex.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.DropTableIndex.throw_exception"); - stat = db_->DropIndex(table_name); + stat = db_->DropIndex(collection_name); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.DropTableIndex.throw_exception"); - stat = db_->DropIndex(table_name); + stat = db_->DropIndex(collection_name); ASSERT_TRUE(stat.ok()); } } diff --git a/core/unittest/db/test_delete.cpp b/core/unittest/db/test_delete.cpp index 97c91fab64..651f044d55 100644 --- a/core/unittest/db/test_delete.cpp +++ b/core/unittest/db/test_delete.cpp @@ -37,15 +37,15 @@ std::string GetTableName() { auto now = std::chrono::system_clock::now(); auto micros = std::chrono::duration_cast(now.time_since_epoch()).count(); - static std::string table_name = std::to_string(micros); - return table_name; + static std::string collection_name = std::to_string(micros); + return collection_name; } -milvus::engine::meta::TableSchema +milvus::engine::meta::CollectionSchema BuildTableSchema() { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = GetTableName(); + table_info.collection_id_ = GetTableName(); table_info.metric_type_ = (int32_t)milvus::engine::MetricType::L2; table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP; return table_info; @@ -64,11 +64,11 @@ BuildVectors(uint64_t n, milvus::engine::VectorsData& vectors) { } // namespace TEST_F(DeleteTest, delete_in_mem) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -81,7 +81,7 @@ TEST_F(DeleteTest, delete_in_mem) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -105,7 +105,7 @@ TEST_F(DeleteTest, delete_in_mem) { ids_to_delete.emplace_back(kv.first); } - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); ASSERT_TRUE(stat.ok()); // std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk @@ -113,7 +113,7 @@ TEST_F(DeleteTest, delete_in_mem) { ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - search_vectors.size()); @@ -124,8 +124,8 @@ TEST_F(DeleteTest, delete_in_mem) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, - result_distances); + stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, + {{"nprobe", nprobe}}, search, result_ids, result_distances); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); ASSERT_GT(result_distances[0], 1); @@ -133,11 +133,11 @@ TEST_F(DeleteTest, delete_in_mem) { } TEST_F(DeleteTest, delete_on_disk) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -150,7 +150,7 @@ TEST_F(DeleteTest, delete_on_disk) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -174,7 +174,7 @@ TEST_F(DeleteTest, delete_on_disk) { ASSERT_TRUE(stat.ok()); for (auto& kv : search_vectors) { - stat = db_->DeleteVector(table_info.table_id_, kv.first); + stat = db_->DeleteVector(table_info.collection_id_, kv.first); ASSERT_TRUE(stat.ok()); } @@ -182,7 +182,7 @@ TEST_F(DeleteTest, delete_on_disk) { ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - search_vectors.size()); @@ -193,8 +193,8 @@ TEST_F(DeleteTest, delete_on_disk) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, - result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); ASSERT_GT(result_distances[0], 1); @@ -202,11 +202,11 @@ TEST_F(DeleteTest, delete_on_disk) { } TEST_F(DeleteTest, delete_multiple_times) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -219,7 +219,7 @@ TEST_F(DeleteTest, delete_multiple_times) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -245,7 +245,7 @@ TEST_F(DeleteTest, delete_multiple_times) { int topk = 10, nprobe = 10; for (auto& pair : search_vectors) { std::vector to_delete{pair.first}; - stat = db_->DeleteVectors(table_info.table_id_, to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, to_delete); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); @@ -256,8 +256,8 @@ TEST_F(DeleteTest, delete_multiple_times) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, - result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); ASSERT_GT(result_distances[0], 1); @@ -265,12 +265,12 @@ TEST_F(DeleteTest, delete_multiple_times) { } TEST_F(DeleteTest, delete_before_create_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); table_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFFLAT; auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -283,7 +283,7 @@ TEST_F(DeleteTest, delete_before_create_index) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); @@ -309,7 +309,7 @@ TEST_F(DeleteTest, delete_before_create_index) { for (auto& kv : search_vectors) { ids_to_delete.emplace_back(kv.first); } - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); @@ -317,11 +317,11 @@ TEST_F(DeleteTest, delete_before_create_index) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; index.extra_params_ = {{"nlist", 100}}; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - ids_to_delete.size()); @@ -332,8 +332,8 @@ TEST_F(DeleteTest, delete_before_create_index) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, - result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); ASSERT_GT(result_distances[0], 1); @@ -341,12 +341,12 @@ TEST_F(DeleteTest, delete_before_create_index) { } TEST_F(DeleteTest, delete_with_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); table_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFFLAT; auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -359,7 +359,7 @@ TEST_F(DeleteTest, delete_with_index) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -381,7 +381,7 @@ TEST_F(DeleteTest, delete_with_index) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; index.extra_params_ = {{"nlist", 100}}; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); // std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk @@ -392,13 +392,13 @@ TEST_F(DeleteTest, delete_with_index) { for (auto& kv : search_vectors) { ids_to_delete.emplace_back(kv.first); } - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - ids_to_delete.size()); @@ -409,8 +409,8 @@ TEST_F(DeleteTest, delete_with_index) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, - result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); ASSERT_GT(result_distances[0], 1); @@ -418,11 +418,11 @@ TEST_F(DeleteTest, delete_with_index) { } TEST_F(DeleteTest, delete_multiple_times_with_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -435,7 +435,7 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -461,14 +461,14 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT; index.extra_params_ = {{"nlist", 1}}; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); int topk = 10, nprobe = 10; int deleted = 0; for (auto& pair : search_vectors) { std::vector to_delete{pair.first}; - stat = db_->DeleteVectors(table_info.table_id_, to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, to_delete); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); @@ -477,7 +477,7 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) { ++deleted; uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - deleted); @@ -486,8 +486,8 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, - result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); @@ -496,11 +496,11 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) { } TEST_F(DeleteTest, delete_single_vector) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -509,21 +509,21 @@ TEST_F(DeleteTest, delete_single_vector) { milvus::engine::VectorsData xb; BuildVectors(nb, xb); - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); // std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk stat = db_->Flush(); ASSERT_TRUE(stat.ok()); - stat = db_->DeleteVectors(table_info.table_id_, xb.id_array_); + stat = db_->DeleteVectors(table_info.collection_id_, xb.id_array_); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, 0); @@ -533,7 +533,8 @@ TEST_F(DeleteTest, delete_single_vector) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, xb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, json_params, xb, result_ids, result_distances); ASSERT_TRUE(result_ids.empty()); ASSERT_TRUE(result_distances.empty()); // ASSERT_EQ(result_ids[0], -1); @@ -542,11 +543,11 @@ TEST_F(DeleteTest, delete_single_vector) { } TEST_F(DeleteTest, delete_add_create_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -555,7 +556,7 @@ TEST_F(DeleteTest, delete_add_create_index) { milvus::engine::VectorsData xb; BuildVectors(nb, xb); - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); // stat = db_->Flush(); @@ -563,27 +564,27 @@ TEST_F(DeleteTest, delete_add_create_index) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; index.extra_params_ = {{"nlist", 100}}; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); std::vector ids_to_delete; ids_to_delete.emplace_back(xb.id_array_.front()); - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); ASSERT_TRUE(stat.ok()); milvus::engine::VectorsData xb2 = xb; xb2.id_array_.clear(); // same vector, different id - stat = db_->InsertVectors(table_info.table_id_, "", xb2); + stat = db_->InsertVectors(table_info.collection_id_, "", xb2); ASSERT_TRUE(stat.ok()); // stat = db_->Flush(); // ASSERT_TRUE(stat.ok()); - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb * 2 - 1); @@ -597,25 +598,26 @@ TEST_F(DeleteTest, delete_add_create_index) { qb.float_data_.resize(TABLE_DIM); qb.vector_count_ = 1; qb.id_array_.clear(); - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, qb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, json_params, qb, result_ids, result_distances); ASSERT_EQ(result_ids[0], xb2.id_array_.front()); ASSERT_LT(result_distances[0], 1e-4); result_ids.clear(); result_distances.clear(); - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, ids_to_delete.front(), + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, ids_to_delete.front(), result_ids, result_distances); ASSERT_EQ(result_ids[0], -1); ASSERT_EQ(result_distances[0], std::numeric_limits::max()); } TEST_F(DeleteTest, delete_add_auto_flush) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -624,7 +626,7 @@ TEST_F(DeleteTest, delete_add_auto_flush) { milvus::engine::VectorsData xb; BuildVectors(nb, xb); - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -633,28 +635,28 @@ TEST_F(DeleteTest, delete_add_auto_flush) { // ASSERT_TRUE(stat.ok()); // milvus::engine::TableIndex index; // index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; - // stat = db_->CreateIndex(table_info.table_id_, index); + // stat = db_->CreateIndex(table_info.collection_id_, index); // ASSERT_TRUE(stat.ok()); std::vector ids_to_delete; ids_to_delete.emplace_back(xb.id_array_.front()); - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); ASSERT_TRUE(stat.ok()); milvus::engine::VectorsData xb2 = xb; xb2.id_array_.clear(); // same vector, different id - stat = db_->InsertVectors(table_info.table_id_, "", xb2); + stat = db_->InsertVectors(table_info.collection_id_, "", xb2); ASSERT_TRUE(stat.ok()); std::this_thread::sleep_for(std::chrono::seconds(2)); // stat = db_->Flush(); // ASSERT_TRUE(stat.ok()); - // stat = db_->CreateIndex(table_info.table_id_, index); + // stat = db_->CreateIndex(table_info.collection_id_, index); // ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb * 2 - 1); @@ -668,25 +670,27 @@ TEST_F(DeleteTest, delete_add_auto_flush) { qb.float_data_.resize(TABLE_DIM); qb.vector_count_ = 1; qb.id_array_.clear(); - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, qb, result_ids, result_distances); + stat = db_->Query(dummy_context_, + table_info.collection_id_, tags, topk, json_params, qb, result_ids, result_distances); ASSERT_EQ(result_ids[0], xb2.id_array_.front()); ASSERT_LT(result_distances[0], 1e-4); result_ids.clear(); result_distances.clear(); - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, {{"nprobe", nprobe}}, ids_to_delete.front(), - result_ids, result_distances); + stat = db_->QueryByID(dummy_context_, + table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, + ids_to_delete.front(), result_ids, result_distances); ASSERT_EQ(result_ids[0], -1); ASSERT_EQ(result_distances[0], std::numeric_limits::max()); } TEST_F(CompactTest, compact_basic) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -695,7 +699,7 @@ TEST_F(CompactTest, compact_basic) { milvus::engine::VectorsData xb; BuildVectors(nb, xb); - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); @@ -704,18 +708,18 @@ TEST_F(CompactTest, compact_basic) { std::vector ids_to_delete; ids_to_delete.emplace_back(xb.id_array_.front()); ids_to_delete.emplace_back(xb.id_array_.back()); - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - 2); - stat = db_->Compact(table_info.table_id_); + stat = db_->Compact(table_info.collection_id_); ASSERT_TRUE(stat.ok()); const int topk = 1, nprobe = 1; @@ -727,7 +731,7 @@ TEST_F(CompactTest, compact_basic) { milvus::engine::VectorsData qb = xb; for (auto& id : ids_to_delete) { - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, id, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids, result_distances); ASSERT_EQ(result_ids[0], -1); ASSERT_EQ(result_distances[0], std::numeric_limits::max()); @@ -735,13 +739,13 @@ TEST_F(CompactTest, compact_basic) { } TEST_F(CompactTest, compact_with_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); table_info.index_file_size_ = milvus::engine::ONE_KB; table_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFSQ8; auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -755,7 +759,7 @@ TEST_F(CompactTest, compact_with_index) { xb.id_array_.emplace_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -776,7 +780,7 @@ TEST_F(CompactTest, compact_with_index) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); stat = db_->Flush(); @@ -786,25 +790,25 @@ TEST_F(CompactTest, compact_with_index) { for (auto& kv : search_vectors) { ids_to_delete.emplace_back(kv.first); } - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - ids_to_delete.size()); - stat = db_->Compact(table_info.table_id_); + stat = db_->Compact(table_info.collection_id_); ASSERT_TRUE(stat.ok()); - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb - ids_to_delete.size()); milvus::engine::TableIndex table_index; - stat = db_->DescribeIndex(table_info.table_id_, table_index); + stat = db_->DescribeIndex(table_info.collection_id_, table_index); ASSERT_TRUE(stat.ok()); ASSERT_FLOAT_EQ(table_index.engine_type_, index.engine_type_); @@ -817,7 +821,7 @@ TEST_F(CompactTest, compact_with_index) { std::vector tags; milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, search, result_ids, + stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, search, result_ids, result_distances); ASSERT_NE(result_ids[0], pair.first); // ASSERT_LT(result_distances[0], 1e-4); diff --git a/core/unittest/db/test_mem.cpp b/core/unittest/db/test_mem.cpp index ebd9fcf548..083142eac5 100644 --- a/core/unittest/db/test_mem.cpp +++ b/core/unittest/db/test_mem.cpp @@ -39,15 +39,15 @@ std::string GetTableName() { auto now = std::chrono::system_clock::now(); auto micros = std::chrono::duration_cast(now.time_since_epoch()).count(); - static std::string table_name = std::to_string(micros); - return table_name; + static std::string collection_name = std::to_string(micros); + return collection_name; } -milvus::engine::meta::TableSchema +milvus::engine::meta::CollectionSchema BuildTableSchema() { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = GetTableName(); + table_info.collection_id_ = GetTableName(); table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP; return table_info; } @@ -65,12 +65,12 @@ BuildVectors(uint64_t n, milvus::engine::VectorsData& vectors) { } // namespace TEST_F(MemManagerTest, VECTOR_SOURCE_TEST) { - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); auto status = impl_->CreateTable(table_schema); ASSERT_TRUE(status.ok()); - milvus::engine::meta::TableFileSchema table_file_schema; - table_file_schema.table_id_ = GetTableName(); + milvus::engine::meta::SegmentSchema table_file_schema; + table_file_schema.collection_id_ = GetTableName(); status = impl_->CreateTableFile(table_file_schema); ASSERT_TRUE(status.ok()); @@ -113,7 +113,7 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) { auto options = GetOptions(); fiu_init(0); - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); auto status = impl_->CreateTable(table_schema); ASSERT_TRUE(status.ok()); @@ -150,7 +150,7 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) { ASSERT_TRUE(status.ok()); { - //test fail create table file + //test fail create collection file FIU_ENABLE_FIU("SqliteMetaImpl.CreateTableFile.throw_exception"); milvus::engine::MemTableFile mem_table_file_1(GetTableName(), impl_, options); fiu_disable("SqliteMetaImpl.CreateTableFile.throw_exception"); @@ -162,8 +162,8 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) { { options.insert_cache_immediately_ = true; - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); - table_schema.table_id_ = "faiss_pq"; + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); + table_schema.collection_id_ = "faiss_pq"; table_schema.engine_type_ = (int)milvus::engine::EngineType::FAISS_PQ; auto status = impl_->CreateTable(table_schema); ASSERT_TRUE(status.ok()); @@ -176,7 +176,7 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) { TEST_F(MemManagerTest, MEM_TABLE_TEST) { auto options = GetOptions(); - milvus::engine::meta::TableSchema table_schema = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema(); auto status = impl_->CreateTable(table_schema); ASSERT_TRUE(status.ok()); @@ -245,11 +245,11 @@ TEST_F(MemManagerTest, MEM_TABLE_TEST) { } TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = GetTableName(); + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = GetTableName(); stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -302,11 +302,11 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { } TEST_F(MemManagerTest2, INSERT_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = GetTableName(); + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = GetTableName(); stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -328,16 +328,16 @@ TEST_F(MemManagerTest2, INSERT_TEST) { } TEST_F(MemManagerTest2, INSERT_BINARY_TEST) { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = GetTableName(); + table_info.collection_id_ = GetTableName(); table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_BIN_IDMAP; table_info.metric_type_ = (int32_t)milvus::engine::MetricType::JACCARD; auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = GetTableName(); + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = GetTableName(); stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -363,11 +363,11 @@ TEST_F(MemManagerTest2, INSERT_BINARY_TEST) { } } // TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { -// milvus::engine::meta::TableSchema table_info = BuildTableSchema(); +// milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); // auto stat = db_->CreateTable(table_info); // -// milvus::engine::meta::TableSchema table_info_get; -// table_info_get.table_id_ = GetTableName(); +// milvus::engine::meta::CollectionSchema table_info_get; +// table_info_get.collection_id_ = GetTableName(); // stat = db_->DescribeTable(table_info_get); // ASSERT_TRUE(stat.ok()); // ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -439,11 +439,11 @@ TEST_F(MemManagerTest2, INSERT_BINARY_TEST) { //} TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = GetTableName(); + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = GetTableName(); stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); diff --git a/core/unittest/db/test_meta.cpp b/core/unittest/db/test_meta.cpp index 5b34f1ced3..00a33c5b9a 100644 --- a/core/unittest/db/test_meta.cpp +++ b/core/unittest/db/test_meta.cpp @@ -25,45 +25,45 @@ #include "src/db/OngoingFileChecker.h" TEST_F(MetaTest, TABLE_TEST) { - auto table_id = "meta_test_table"; + auto collection_id = "meta_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); ASSERT_TRUE(status.ok()); - auto gid = table.id_; - table.id_ = -1; - status = impl_->DescribeTable(table); + auto gid = collection.id_; + collection.id_ = -1; + status = impl_->DescribeTable(collection); ASSERT_TRUE(status.ok()); - ASSERT_EQ(table.id_, gid); - ASSERT_EQ(table.table_id_, table_id); + ASSERT_EQ(collection.id_, gid); + ASSERT_EQ(collection.collection_id_, collection_id); - table.table_id_ = "not_found"; - status = impl_->DescribeTable(table); + collection.collection_id_ = "not_found"; + status = impl_->DescribeTable(collection); ASSERT_TRUE(!status.ok()); - table.table_id_ = table_id; - status = impl_->CreateTable(table); + collection.collection_id_ = collection_id; + status = impl_->CreateTable(collection); ASSERT_EQ(status.code(), milvus::DB_ALREADY_EXIST); - status = impl_->DropTable(table.table_id_); + status = impl_->DropTable(collection.collection_id_); ASSERT_TRUE(status.ok()); - status = impl_->CreateTable(table); + status = impl_->CreateTable(collection); ASSERT_EQ(status.code(), milvus::DB_ERROR); - table.table_id_ = ""; - status = impl_->CreateTable(table); + collection.collection_id_ = ""; + status = impl_->CreateTable(collection); ASSERT_TRUE(status.ok()); } TEST_F(MetaTest, FALID_TEST) { fiu_init(0); auto options = GetOptions(); - auto table_id = "meta_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; + auto collection_id = "meta_test_table"; + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; milvus::Status status; { @@ -87,102 +87,102 @@ TEST_F(MetaTest, FALID_TEST) { } { FIU_ENABLE_FIU("SqliteMetaImpl.CreateTable.throw_exception"); - status = impl_->CreateTable(table); + status = impl_->CreateTable(collection); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.CreateTable.throw_exception"); FIU_ENABLE_FIU("SqliteMetaImpl.CreateTable.insert_throw_exception"); - table.table_id_ = ""; - status = impl_->CreateTable(table); + collection.collection_id_ = ""; + status = impl_->CreateTable(collection); ASSERT_FALSE(status.ok()); fiu_disable("SqliteMetaImpl.CreateTable.insert_throw_exception"); - //success create table - table.table_id_ = table_id; - status = impl_->CreateTable(table); + //success create collection + collection.collection_id_ = collection_id; + status = impl_->CreateTable(collection); ASSERT_TRUE(status.ok()); } { FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTable.throw_exception"); - status = impl_->DescribeTable(table); + status = impl_->DescribeTable(collection); ASSERT_FALSE(status.ok()); fiu_disable("SqliteMetaImpl.DescribeTable.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.HasTable.throw_exception"); bool has = false; - status = impl_->HasTable(table.table_id_, has); + status = impl_->HasTable(collection.collection_id_, has); ASSERT_FALSE(status.ok()); ASSERT_FALSE(has); fiu_disable("SqliteMetaImpl.HasTable.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.AllTables.throw_exception"); - std::vector table_schema_array; + std::vector table_schema_array; status = impl_->AllTables(table_schema_array); ASSERT_FALSE(status.ok()); fiu_disable("SqliteMetaImpl.AllTables.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.DropTable.throw_exception"); - status = impl_->DropTable(table.table_id_); + status = impl_->DropTable(collection.collection_id_); ASSERT_FALSE(status.ok()); fiu_disable("SqliteMetaImpl.DropTable.throw_exception"); } { - milvus::engine::meta::TableFileSchema schema; - schema.table_id_ = "notexist"; + milvus::engine::meta::SegmentSchema schema; + schema.collection_id_ = "notexist"; status = impl_->CreateTableFile(schema); ASSERT_FALSE(status.ok()); FIU_ENABLE_FIU("SqliteMetaImpl.CreateTableFile.throw_exception"); - schema.table_id_ = table_id; + schema.collection_id_ = collection_id; status = impl_->CreateTableFile(schema); ASSERT_FALSE(status.ok()); fiu_disable("SqliteMetaImpl.CreateTableFile.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.DeleteTableFiles.throw_exception"); - status = impl_->DeleteTableFiles(table.table_id_); + status = impl_->DeleteTableFiles(collection.collection_id_); ASSERT_FALSE(status.ok()); fiu_disable("SqliteMetaImpl.DeleteTableFiles.throw_exception"); } { - milvus::engine::meta::TableFilesSchema schemas; + milvus::engine::meta::SegmentsSchema schemas; std::vector ids; status = impl_->GetTableFiles("notexist", ids, schemas); ASSERT_FALSE(status.ok()); FIU_ENABLE_FIU("SqliteMetaImpl.GetTableFiles.throw_exception"); - status = impl_->GetTableFiles(table_id, ids, schemas); + status = impl_->GetTableFiles(collection_id, ids, schemas); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.GetTableFiles.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFlag.throw_exception"); - status = impl_->UpdateTableFlag(table_id, 0); + status = impl_->UpdateTableFlag(collection_id, 0); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.UpdateTableFlag.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFile.throw_exception"); - milvus::engine::meta::TableFileSchema schema; - schema.table_id_ = table_id; + milvus::engine::meta::SegmentSchema schema; + schema.collection_id_ = collection_id; status = impl_->UpdateTableFile(schema); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.UpdateTableFile.throw_exception"); schema = {}; - schema.table_id_ = "notexist"; + schema.collection_id_ = "notexist"; status = impl_->UpdateTableFile(schema); ASSERT_TRUE(status.ok()); } { - milvus::engine::meta::TableFilesSchema schemas; - milvus::engine::meta::TableFileSchema schema; - schema.table_id_ = "notexits"; + milvus::engine::meta::SegmentsSchema schemas; + milvus::engine::meta::SegmentSchema schema; + schema.collection_id_ = "notexits"; schemas.emplace_back(schema); status = impl_->UpdateTableFiles(schemas); ASSERT_TRUE(status.ok()); @@ -208,19 +208,19 @@ TEST_F(MetaTest, FALID_TEST) { fiu_disable("SqliteMetaImpl.UpdateTableIndex.throw_exception"); FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTableIndex.throw_exception"); - status = impl_->DescribeTableIndex(table_id, index); + status = impl_->DescribeTableIndex(collection_id, index); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.DescribeTableIndex.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFilesToIndex.throw_exception"); - status = impl_->UpdateTableFilesToIndex(table_id); + status = impl_->UpdateTableFilesToIndex(collection_id); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.UpdateTableFilesToIndex.throw_exception"); } { FIU_ENABLE_FIU("SqliteMetaImpl.DropTableIndex.throw_exception"); - status = impl_->DropTableIndex(table_id); + status = impl_->DropTableIndex(collection_id); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.DropTableIndex.throw_exception"); } @@ -230,54 +230,54 @@ TEST_F(MetaTest, FALID_TEST) { status = impl_->CreatePartition("notexist", partition, partition_tag, 0); ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND); - status = impl_->CreatePartition(table_id, partition, partition_tag, 0); + status = impl_->CreatePartition(collection_id, partition, partition_tag, 0); ASSERT_TRUE(status.ok()); partition_tag = "tag1"; - status = impl_->CreatePartition(table_id, partition, partition_tag, 0); + status = impl_->CreatePartition(collection_id, partition, partition_tag, 0); ASSERT_FALSE(status.ok()); //create empty name partition partition = ""; - status = impl_->CreatePartition(table_id, partition, partition_tag, 0); + status = impl_->CreatePartition(collection_id, partition, partition_tag, 0); ASSERT_TRUE(status.ok()); - std::vector partions_schema; - status = impl_->ShowPartitions(table_id, partions_schema); + std::vector partions_schema; + status = impl_->ShowPartitions(collection_id, partions_schema); ASSERT_TRUE(status.ok()); ASSERT_EQ(partions_schema.size(), 2); partions_schema.clear(); FIU_ENABLE_FIU("SqliteMetaImpl.ShowPartitions.throw_exception"); - status = impl_->ShowPartitions(table_id, partions_schema); + status = impl_->ShowPartitions(collection_id, partions_schema); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.ShowPartitions.throw_exception"); std::string partion; FIU_ENABLE_FIU("SqliteMetaImpl.GetPartitionName.throw_exception"); - status = impl_->GetPartitionName(table_id, "tag0", partion); + status = impl_->GetPartitionName(collection_id, "tag0", partion); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.GetPartitionName.throw_exception"); } { - milvus::engine::meta::TableFilesSchema table_files; + milvus::engine::meta::SegmentsSchema table_files; status = impl_->FilesToSearch("notexist", table_files); ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND); FIU_ENABLE_FIU("SqliteMetaImpl.FilesToSearch.throw_exception"); - status = impl_->FilesToSearch(table_id, table_files); + status = impl_->FilesToSearch(collection_id, table_files); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.FilesToSearch.throw_exception"); } { - milvus::engine::meta::TableFileSchema file; - file.table_id_ = table_id; + milvus::engine::meta::SegmentSchema file; + file.collection_id_ = collection_id; status = impl_->CreateTableFile(file); ASSERT_TRUE(status.ok()); - file.file_type_ = milvus::engine::meta::TableFileSchema::TO_INDEX; + file.file_type_ = milvus::engine::meta::SegmentSchema::TO_INDEX; impl_->UpdateTableFile(file); - milvus::engine::meta::TableFilesSchema files; + milvus::engine::meta::SegmentsSchema files; FIU_ENABLE_FIU("SqliteMetaImpl_FilesToIndex_TableNotFound"); status = impl_->FilesToIndex(files); ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND); @@ -289,11 +289,11 @@ TEST_F(MetaTest, FALID_TEST) { fiu_disable("SqliteMetaImpl.FilesToIndex.throw_exception"); } { - milvus::engine::meta::TableFilesSchema files; + milvus::engine::meta::SegmentsSchema files; std::vector file_types; - file_types.push_back(milvus::engine::meta::TableFileSchema::INDEX); + file_types.push_back(milvus::engine::meta::SegmentSchema::INDEX); FIU_ENABLE_FIU("SqliteMetaImpl.FilesByType.throw_exception"); - status = impl_->FilesByType(table_id, file_types, files); + status = impl_->FilesByType(collection_id, file_types, files); ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED); fiu_disable("SqliteMetaImpl.FilesByType.throw_exception"); } @@ -354,27 +354,27 @@ TEST_F(MetaTest, FALID_TEST) { } TEST_F(MetaTest, TABLE_FILE_TEST) { - auto table_id = "meta_test_table"; + auto collection_id = "meta_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - table.dimension_ = 256; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + collection.dimension_ = 256; + auto status = impl_->CreateTable(collection); - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; status = impl_->CreateTableFile(table_file); ASSERT_TRUE(status.ok()); - ASSERT_EQ(table_file.file_type_, milvus::engine::meta::TableFileSchema::NEW); + ASSERT_EQ(table_file.file_type_, milvus::engine::meta::SegmentSchema::NEW); uint64_t cnt = 0; - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_TRUE(status.ok()); ASSERT_EQ(cnt, 0UL); auto file_id = table_file.file_id_; - auto new_file_type = milvus::engine::meta::TableFileSchema::INDEX; + auto new_file_type = milvus::engine::meta::SegmentSchema::INDEX; table_file.file_type_ = new_file_type; status = impl_->UpdateTableFile(table_file); @@ -383,41 +383,41 @@ TEST_F(MetaTest, TABLE_FILE_TEST) { } TEST_F(MetaTest, TABLE_FILE_ROW_COUNT_TEST) { - auto table_id = "row_count_test_table"; + auto collection_id = "row_count_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - table.dimension_ = 256; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + collection.dimension_ = 256; + auto status = impl_->CreateTable(collection); - milvus::engine::meta::TableFileSchema table_file; + milvus::engine::meta::SegmentSchema table_file; table_file.row_count_ = 100; - table_file.table_id_ = table.table_id_; + table_file.collection_id_ = collection.collection_id_; table_file.file_type_ = 1; status = impl_->CreateTableFile(table_file); uint64_t cnt = 0; - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_EQ(table_file.row_count_, cnt); table_file.row_count_ = 99999; - milvus::engine::meta::TableFilesSchema table_files = {table_file}; + milvus::engine::meta::SegmentsSchema table_files = {table_file}; status = impl_->UpdateTableFilesRowCount(table_files); ASSERT_TRUE(status.ok()); cnt = 0; - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_EQ(table_file.row_count_, cnt); std::vector ids = {table_file.id_}; - milvus::engine::meta::TableFilesSchema schemas; - status = impl_->GetTableFiles(table_id, ids, schemas); + milvus::engine::meta::SegmentsSchema schemas; + status = impl_->GetTableFiles(collection_id, ids, schemas); ASSERT_EQ(schemas.size(), 1UL); ASSERT_EQ(table_file.row_count_, schemas[0].row_count_); ASSERT_EQ(table_file.file_id_, schemas[0].file_id_); ASSERT_EQ(table_file.file_type_, schemas[0].file_type_); ASSERT_EQ(table_file.segment_id_, schemas[0].segment_id_); - ASSERT_EQ(table_file.table_id_, schemas[0].table_id_); + ASSERT_EQ(table_file.collection_id_, schemas[0].collection_id_); ASSERT_EQ(table_file.engine_type_, schemas[0].engine_type_); ASSERT_EQ(table_file.dimension_, schemas[0].dimension_); ASSERT_EQ(table_file.flush_lsn_, schemas[0].flush_lsn_); @@ -434,15 +434,15 @@ TEST_F(MetaTest, ARCHIVE_TEST_DAYS) { options.archive_conf_ = milvus::engine::ArchiveConf("delete", ss.str()); milvus::engine::meta::SqliteMetaImpl impl(options); - auto table_id = "meta_test_table"; + auto collection_id = "meta_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl.CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl.CreateTable(collection); - milvus::engine::meta::TableFilesSchema files; - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentsSchema files; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; auto cnt = 100; int64_t ts = milvus::engine::utils::GetMicroSecTimeStamp(); @@ -450,7 +450,7 @@ TEST_F(MetaTest, ARCHIVE_TEST_DAYS) { std::vector ids; for (auto i = 0; i < cnt; ++i) { status = impl.CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; int day = rand_r(&seed) % (days_num * 2); table_file.created_on_ = ts - day * milvus::engine::meta::DAY * milvus::engine::meta::US_PS - 10000; status = impl.UpdateTableFile(table_file); @@ -470,13 +470,13 @@ TEST_F(MetaTest, ARCHIVE_TEST_DAYS) { impl.Archive(); int i = 0; - milvus::engine::meta::TableFilesSchema files_get; - status = impl.GetTableFiles(table_file.table_id_, ids, files_get); + milvus::engine::meta::SegmentsSchema files_get; + status = impl.GetTableFiles(table_file.collection_id_, ids, files_get); ASSERT_TRUE(status.ok()); for (auto& file : files_get) { if (days[i] < days_num) { - ASSERT_EQ(file.file_type_, milvus::engine::meta::TableFileSchema::NEW); + ASSERT_EQ(file.file_type_, milvus::engine::meta::SegmentSchema::NEW); } i++; } @@ -490,22 +490,22 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) { options.archive_conf_ = milvus::engine::ArchiveConf("delete", "disk:11"); milvus::engine::meta::SqliteMetaImpl impl(options); - auto table_id = "meta_test_group"; + auto collection_id = "meta_test_group"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl.CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl.CreateTable(collection); - milvus::engine::meta::TableFilesSchema files; - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentsSchema files; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; auto cnt = 10; auto each_size = 2UL; std::vector ids; for (auto i = 0; i < cnt; ++i) { status = impl.CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; table_file.file_size_ = each_size * milvus::engine::G; status = impl.UpdateTableFile(table_file); files.push_back(table_file); @@ -526,13 +526,13 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) { impl.Archive(); int i = 0; - milvus::engine::meta::TableFilesSchema files_get; - status = impl.GetTableFiles(table_file.table_id_, ids, files_get); + milvus::engine::meta::SegmentsSchema files_get; + status = impl.GetTableFiles(table_file.collection_id_, ids, files_get); ASSERT_TRUE(status.ok()); for (auto& file : files_get) { if (i >= 5) { - ASSERT_EQ(file.file_type_, milvus::engine::meta::TableFileSchema::NEW); + ASSERT_EQ(file.file_type_, milvus::engine::meta::SegmentSchema::NEW); } ++i; } @@ -541,11 +541,11 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) { } TEST_F(MetaTest, TABLE_FILES_TEST) { - auto table_id = "meta_test_group"; + auto collection_id = "meta_test_group"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); uint64_t new_merge_files_cnt = 1; uint64_t new_index_files_cnt = 2; @@ -555,73 +555,73 @@ TEST_F(MetaTest, TABLE_FILES_TEST) { uint64_t to_index_files_cnt = 6; uint64_t index_files_cnt = 7; - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; for (auto i = 0; i < new_merge_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW_MERGE; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_MERGE; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < new_index_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW_INDEX; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_INDEX; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < backup_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::BACKUP; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::BACKUP; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < new_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < raw_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::RAW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < to_index_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::TO_INDEX; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_INDEX; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < index_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::INDEX; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::INDEX; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } uint64_t total_row_count = 0; - status = impl_->Count(table_id, total_row_count); + status = impl_->Count(collection_id, total_row_count); ASSERT_TRUE(status.ok()); ASSERT_EQ(total_row_count, raw_files_cnt + to_index_files_cnt + index_files_cnt); - milvus::engine::meta::TableFilesSchema files; + milvus::engine::meta::SegmentsSchema files; status = impl_->FilesToIndex(files); ASSERT_EQ(files.size(), to_index_files_cnt); - milvus::engine::meta::TableFilesSchema table_files; - status = impl_->FilesToMerge(table.table_id_, table_files); + milvus::engine::meta::SegmentsSchema table_files; + status = impl_->FilesToMerge(collection.collection_id_, table_files); ASSERT_EQ(table_files.size(), raw_files_cnt); status = impl_->FilesToIndex(files); ASSERT_EQ(files.size(), to_index_files_cnt); table_files.clear(); - status = impl_->FilesToSearch(table_id, table_files); + status = impl_->FilesToSearch(collection_id, table_files); ASSERT_EQ(table_files.size(), to_index_files_cnt + raw_files_cnt + index_files_cnt); std::vector ids; @@ -640,104 +640,104 @@ TEST_F(MetaTest, TABLE_FILES_TEST) { table_files.clear(); std::vector file_types; - status = impl_->FilesByType(table.table_id_, file_types, table_files); + status = impl_->FilesByType(collection.collection_id_, file_types, table_files); ASSERT_TRUE(table_files.empty()); ASSERT_FALSE(status.ok()); file_types = { - milvus::engine::meta::TableFileSchema::NEW, milvus::engine::meta::TableFileSchema::NEW_MERGE, - milvus::engine::meta::TableFileSchema::NEW_INDEX, milvus::engine::meta::TableFileSchema::TO_INDEX, - milvus::engine::meta::TableFileSchema::INDEX, milvus::engine::meta::TableFileSchema::RAW, - milvus::engine::meta::TableFileSchema::BACKUP, + milvus::engine::meta::SegmentSchema::NEW, milvus::engine::meta::SegmentSchema::NEW_MERGE, + milvus::engine::meta::SegmentSchema::NEW_INDEX, milvus::engine::meta::SegmentSchema::TO_INDEX, + milvus::engine::meta::SegmentSchema::INDEX, milvus::engine::meta::SegmentSchema::RAW, + milvus::engine::meta::SegmentSchema::BACKUP, }; - status = impl_->FilesByType(table.table_id_, file_types, table_files); + status = impl_->FilesByType(collection.collection_id_, file_types, table_files); ASSERT_TRUE(status.ok()); uint64_t total_cnt = new_index_files_cnt + new_merge_files_cnt + backup_files_cnt + new_files_cnt + raw_files_cnt + to_index_files_cnt + index_files_cnt; ASSERT_EQ(table_files.size(), total_cnt); - status = impl_->DeleteTableFiles(table_id); + status = impl_->DeleteTableFiles(collection_id); ASSERT_TRUE(status.ok()); status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; status = impl_->UpdateTableFile(table_file); status = impl_->CleanUpShadowFiles(); ASSERT_TRUE(status.ok()); - table_file.table_id_ = table.table_id_; - table_file.file_type_ = milvus::engine::meta::TableFileSchema::TO_DELETE; + table_file.collection_id_ = collection.collection_id_; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_DELETE; status = impl_->CreateTableFile(table_file); std::vector files_to_delete; - milvus::engine::meta::TableFilesSchema files_schema; - files_to_delete.push_back(milvus::engine::meta::TableFileSchema::TO_DELETE); - status = impl_->FilesByType(table_id, files_to_delete, files_schema); + milvus::engine::meta::SegmentsSchema files_schema; + files_to_delete.push_back(milvus::engine::meta::SegmentSchema::TO_DELETE); + status = impl_->FilesByType(collection_id, files_to_delete, files_schema); ASSERT_TRUE(status.ok()); - table_file.table_id_ = table_id; - table_file.file_type_ = milvus::engine::meta::TableFileSchema::TO_DELETE; + table_file.collection_id_ = collection_id; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_DELETE; table_file.file_id_ = files_schema.front().file_id_; milvus::engine::OngoingFileChecker::GetInstance().MarkOngoingFile(table_file); status = impl_->CleanUpFilesWithTTL(1UL); ASSERT_TRUE(status.ok()); - status = impl_->DropTable(table_id); + status = impl_->DropTable(collection_id); ASSERT_TRUE(status.ok()); } TEST_F(MetaTest, INDEX_TEST) { - auto table_id = "index_test"; + auto collection_id = "index_test"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); milvus::engine::TableIndex index; index.metric_type_ = 2; index.extra_params_ = {{"nlist", 1234}}; index.engine_type_ = 3; - status = impl_->UpdateTableIndex(table_id, index); + status = impl_->UpdateTableIndex(collection_id, index); ASSERT_TRUE(status.ok()); int64_t flag = 65536; - status = impl_->UpdateTableFlag(table_id, flag); + status = impl_->UpdateTableFlag(collection_id, flag); ASSERT_TRUE(status.ok()); - milvus::engine::meta::TableSchema table_info; - table_info.table_id_ = table_id; + milvus::engine::meta::CollectionSchema table_info; + table_info.collection_id_ = collection_id; status = impl_->DescribeTable(table_info); ASSERT_EQ(table_info.flag_, flag); milvus::engine::TableIndex index_out; - status = impl_->DescribeTableIndex(table_id, index_out); + status = impl_->DescribeTableIndex(collection_id, index_out); ASSERT_EQ(index_out.metric_type_, index.metric_type_); ASSERT_EQ(index_out.extra_params_, index.extra_params_); ASSERT_EQ(index_out.engine_type_, index.engine_type_); - status = impl_->DropTableIndex(table_id); + status = impl_->DropTableIndex(collection_id); ASSERT_TRUE(status.ok()); - status = impl_->DescribeTableIndex(table_id, index_out); + status = impl_->DescribeTableIndex(collection_id, index_out); ASSERT_EQ(index_out.metric_type_, index.metric_type_); ASSERT_NE(index_out.engine_type_, index.engine_type_); - status = impl_->UpdateTableFilesToIndex(table_id); + status = impl_->UpdateTableFilesToIndex(collection_id); ASSERT_TRUE(status.ok()); } TEST_F(MetaTest, LSN_TEST) { - auto table_id = "lsn_test"; + auto collection_id = "lsn_test"; uint64_t lsn = 42949672960; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); - status = impl_->UpdateTableFlushLSN(table_id, lsn); + status = impl_->UpdateTableFlushLSN(collection_id, lsn); ASSERT_TRUE(status.ok()); uint64_t temp_lsb = 0; - status = impl_->GetTableFlushLSN(table_id, temp_lsb); + status = impl_->GetTableFlushLSN(collection_id, temp_lsb); ASSERT_EQ(temp_lsb, lsn); status = impl_->SetGlobalLastLSN(lsn); diff --git a/core/unittest/db/test_meta_mysql.cpp b/core/unittest/db/test_meta_mysql.cpp index f8e769edf6..0cd3edba77 100644 --- a/core/unittest/db/test_meta_mysql.cpp +++ b/core/unittest/db/test_meta_mysql.cpp @@ -26,86 +26,86 @@ #include const char* FAILED_CONNECT_SQL_SERVER = "Failed to connect to meta server(mysql)"; -const char* TABLE_ALREADY_EXISTS = "Table already exists and it is in delete state, please wait a second"; +const char* TABLE_ALREADY_EXISTS = "Collection already exists and it is in delete state, please wait a second"; TEST_F(MySqlMetaTest, TABLE_TEST) { - auto table_id = "meta_test_table"; + auto collection_id = "meta_test_table"; fiu_init(0); - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); ASSERT_TRUE(status.ok()); - auto gid = table.id_; - table.id_ = -1; - status = impl_->DescribeTable(table); + auto gid = collection.id_; + collection.id_ = -1; + status = impl_->DescribeTable(collection); ASSERT_TRUE(status.ok()); - ASSERT_EQ(table.id_, gid); - ASSERT_EQ(table.table_id_, table_id); + ASSERT_EQ(collection.id_, gid); + ASSERT_EQ(collection.collection_id_, collection_id); - table.table_id_ = "not_found"; - status = impl_->DescribeTable(table); + collection.collection_id_ = "not_found"; + status = impl_->DescribeTable(collection); ASSERT_TRUE(!status.ok()); - table.table_id_ = table_id; - status = impl_->CreateTable(table); + collection.collection_id_ = collection_id; + status = impl_->CreateTable(collection); ASSERT_EQ(status.code(), milvus::DB_ALREADY_EXIST); - table.table_id_ = ""; - status = impl_->CreateTable(table); + collection.collection_id_ = ""; + status = impl_->CreateTable(collection); // ASSERT_TRUE(status.ok()); - table.table_id_ = table_id; + collection.collection_id_ = collection_id; FIU_ENABLE_FIU("MySQLMetaImpl.CreateTable.null_connection"); - auto stat = impl_->CreateTable(table); + auto stat = impl_->CreateTable(collection); ASSERT_FALSE(stat.ok()); ASSERT_EQ(stat.message(), FAILED_CONNECT_SQL_SERVER); fiu_disable("MySQLMetaImpl.CreateTable.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.CreateTable.throw_exception"); - stat = impl_->CreateTable(table); + stat = impl_->CreateTable(collection); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.CreateTable.throw_exception"); - //ensure table exists - stat = impl_->CreateTable(table); + //ensure collection exists + stat = impl_->CreateTable(collection); FIU_ENABLE_FIU("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE"); - stat = impl_->CreateTable(table); + stat = impl_->CreateTable(collection); ASSERT_FALSE(stat.ok()); ASSERT_EQ(stat.message(), TABLE_ALREADY_EXISTS); fiu_disable("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE"); FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.null_connection"); - stat = impl_->DescribeTable(table); + stat = impl_->DescribeTable(collection); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.DescribeTable.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception"); - stat = impl_->DescribeTable(table); + stat = impl_->DescribeTable(collection); ASSERT_FALSE(stat.ok()); fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception"); bool has_table = false; - stat = impl_->HasTable(table_id, has_table); + stat = impl_->HasTable(collection_id, has_table); ASSERT_TRUE(stat.ok()); ASSERT_TRUE(has_table); has_table = false; FIU_ENABLE_FIU("MySQLMetaImpl.HasTable.null_connection"); - stat = impl_->HasTable(table_id, has_table); + stat = impl_->HasTable(collection_id, has_table); ASSERT_FALSE(stat.ok()); ASSERT_FALSE(has_table); fiu_disable("MySQLMetaImpl.HasTable.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.HasTable.throw_exception"); - stat = impl_->HasTable(table_id, has_table); + stat = impl_->HasTable(collection_id, has_table); ASSERT_FALSE(stat.ok()); ASSERT_FALSE(has_table); fiu_disable("MySQLMetaImpl.HasTable.throw_exception"); FIU_ENABLE_FIU("MySQLMetaImpl.DropTable.CLUSTER_WRITABLE_MODE"); - stat = impl_->DropTable(table_id); + stat = impl_->DropTable(collection_id); fiu_disable("MySQLMetaImpl.DropTable.CLUSTER_WRITABLE_MODE"); FIU_ENABLE_FIU("MySQLMetaImpl.DropAll.null_connection"); @@ -123,7 +123,7 @@ TEST_F(MySqlMetaTest, TABLE_TEST) { } TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { - auto table_id = "meta_test_table"; + auto collection_id = "meta_test_table"; fiu_init(0); uint64_t size = 0; @@ -131,17 +131,17 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { ASSERT_TRUE(status.ok()); ASSERT_EQ(size, 0); - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - table.dimension_ = 256; - status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + collection.dimension_ = 256; + status = impl_->CreateTable(collection); //CreateTableFile - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; status = impl_->CreateTableFile(table_file); ASSERT_TRUE(status.ok()); - ASSERT_EQ(table_file.file_type_, milvus::engine::meta::TableFileSchema::NEW); + ASSERT_EQ(table_file.file_type_, milvus::engine::meta::SegmentSchema::NEW); FIU_ENABLE_FIU("MySQLMetaImpl.CreateTableFiles.null_connection"); status = impl_->CreateTableFile(table_file); @@ -160,27 +160,27 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { //Count uint64_t cnt = 0; - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); // ASSERT_TRUE(status.ok()); // ASSERT_EQ(cnt, 0UL); FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception"); - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception"); FIU_ENABLE_FIU("MySQLMetaImpl.Count.null_connection"); - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.Count.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.Count.throw_exception"); - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.Count.throw_exception"); auto file_id = table_file.file_id_; - auto new_file_type = milvus::engine::meta::TableFileSchema::INDEX; + auto new_file_type = milvus::engine::meta::SegmentSchema::INDEX; table_file.file_type_ = new_file_type; //UpdateTableFile @@ -199,7 +199,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { ASSERT_EQ(table_file.file_type_, new_file_type); auto no_table_file = table_file; - no_table_file.table_id_ = "notexist"; + no_table_file.collection_id_ = "notexist"; status = impl_->UpdateTableFile(no_table_file); ASSERT_TRUE(status.ok()); @@ -216,7 +216,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { status = impl_->CleanUpShadowFiles(); ASSERT_TRUE(status.ok()); - milvus::engine::meta::TableFilesSchema files_schema; + milvus::engine::meta::SegmentsSchema files_schema; FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFiles.null_connection"); status = impl_->UpdateTableFiles(files_schema); ASSERT_FALSE(status.ok()); @@ -231,89 +231,89 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { ASSERT_TRUE(status.ok()); std::vector ids = {table_file.id_}; - milvus::engine::meta::TableFilesSchema files; - status = impl_->GetTableFiles(table_file.table_id_, ids, files); + milvus::engine::meta::SegmentsSchema files; + status = impl_->GetTableFiles(table_file.collection_id_, ids, files); ASSERT_EQ(files.size(), 0UL); FIU_ENABLE_FIU("MySQLMetaImpl.GetTableFiles.null_connection"); - status = impl_->GetTableFiles(table_file.table_id_, ids, files); + status = impl_->GetTableFiles(table_file.collection_id_, ids, files); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.GetTableFiles.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.GetTableFiles.throw_exception"); - status = impl_->GetTableFiles(table_file.table_id_, ids, files); + status = impl_->GetTableFiles(table_file.collection_id_, ids, files); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.GetTableFiles.throw_exception"); ids.clear(); - status = impl_->GetTableFiles(table_file.table_id_, ids, files); + status = impl_->GetTableFiles(table_file.collection_id_, ids, files); ASSERT_TRUE(status.ok()); - table_file.table_id_ = table.table_id_; - table_file.file_type_ = milvus::engine::meta::TableFileSchema::RAW; + table_file.collection_id_ = collection.collection_id_; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW; status = impl_->CreateTableFile(table_file); ids = {table_file.id_}; status = impl_->FilesByID(ids, files); ASSERT_EQ(files.size(), 1UL); - table_file.table_id_ = table.table_id_; - table_file.file_type_ = milvus::engine::meta::TableFileSchema::TO_DELETE; + table_file.collection_id_ = collection.collection_id_; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_DELETE; status = impl_->CreateTableFile(table_file); std::vector files_to_delete; - files_to_delete.push_back(milvus::engine::meta::TableFileSchema::TO_DELETE); - status = impl_->FilesByType(table_id, files_to_delete, files_schema); + files_to_delete.push_back(milvus::engine::meta::SegmentSchema::TO_DELETE); + status = impl_->FilesByType(collection_id, files_to_delete, files_schema); ASSERT_TRUE(status.ok()); - table_file.table_id_ = table_id; - table_file.file_type_ = milvus::engine::meta::TableFileSchema::TO_DELETE; + table_file.collection_id_ = collection_id; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_DELETE; table_file.file_id_ = files_schema.front().file_id_; milvus::engine::OngoingFileChecker::GetInstance().MarkOngoingFile(table_file); status = impl_->CleanUpFilesWithTTL(1UL); ASSERT_TRUE(status.ok()); - status = impl_->DropTable(table_file.table_id_); + status = impl_->DropTable(table_file.collection_id_); ASSERT_TRUE(status.ok()); status = impl_->UpdateTableFile(table_file); ASSERT_TRUE(status.ok()); } TEST_F(MySqlMetaTest, TABLE_FILE_ROW_COUNT_TEST) { - auto table_id = "row_count_test_table"; + auto collection_id = "row_count_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - table.dimension_ = 256; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + collection.dimension_ = 256; + auto status = impl_->CreateTable(collection); - milvus::engine::meta::TableFileSchema table_file; + milvus::engine::meta::SegmentSchema table_file; table_file.row_count_ = 100; - table_file.table_id_ = table.table_id_; + table_file.collection_id_ = collection.collection_id_; table_file.file_type_ = 1; status = impl_->CreateTableFile(table_file); uint64_t cnt = 0; - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_EQ(table_file.row_count_, cnt); table_file.row_count_ = 99999; - milvus::engine::meta::TableFilesSchema table_files = {table_file}; + milvus::engine::meta::SegmentsSchema table_files = {table_file}; status = impl_->UpdateTableFilesRowCount(table_files); ASSERT_TRUE(status.ok()); cnt = 0; - status = impl_->Count(table_id, cnt); + status = impl_->Count(collection_id, cnt); ASSERT_EQ(table_file.row_count_, cnt); std::vector ids = {table_file.id_}; - milvus::engine::meta::TableFilesSchema schemas; - status = impl_->GetTableFiles(table_id, ids, schemas); + milvus::engine::meta::SegmentsSchema schemas; + status = impl_->GetTableFiles(collection_id, ids, schemas); ASSERT_EQ(schemas.size(), 1UL); ASSERT_EQ(table_file.row_count_, schemas[0].row_count_); ASSERT_EQ(table_file.file_id_, schemas[0].file_id_); ASSERT_EQ(table_file.file_type_, schemas[0].file_type_); ASSERT_EQ(table_file.segment_id_, schemas[0].segment_id_); - ASSERT_EQ(table_file.table_id_, schemas[0].table_id_); + ASSERT_EQ(table_file.collection_id_, schemas[0].collection_id_); ASSERT_EQ(table_file.engine_type_, schemas[0].engine_type_); ASSERT_EQ(table_file.dimension_, schemas[0].dimension_); ASSERT_EQ(table_file.flush_lsn_, schemas[0].flush_lsn_); @@ -333,15 +333,15 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) { int mode = milvus::engine::DBOptions::MODE::SINGLE; milvus::engine::meta::MySQLMetaImpl impl(options, mode); - auto table_id = "meta_test_table"; + auto collection_id = "meta_test_table"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl.CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl.CreateTable(collection); - milvus::engine::meta::TableFilesSchema files; - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentsSchema files; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; auto cnt = 100; int64_t ts = milvus::engine::utils::GetMicroSecTimeStamp(); @@ -349,7 +349,7 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) { std::vector ids; for (auto i = 0; i < cnt; ++i) { status = impl.CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; int day = rand_r(&seed) % (days_num * 2); table_file.created_on_ = ts - day * milvus::engine::meta::DAY * milvus::engine::meta::US_PS - 10000; status = impl.UpdateTableFile(table_file); @@ -371,47 +371,47 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) { impl.Archive(); int i = 0; - milvus::engine::meta::TableFilesSchema files_get; - status = impl.GetTableFiles(table_file.table_id_, ids, files_get); + milvus::engine::meta::SegmentsSchema files_get; + status = impl.GetTableFiles(table_file.collection_id_, ids, files_get); ASSERT_TRUE(status.ok()); for (auto& file : files_get) { if (days[i] < days_num) { - ASSERT_EQ(file.file_type_, milvus::engine::meta::TableFileSchema::NEW); + ASSERT_EQ(file.file_type_, milvus::engine::meta::SegmentSchema::NEW); } i++; } std::vector file_types = { - (int)milvus::engine::meta::TableFileSchema::NEW, + (int)milvus::engine::meta::SegmentSchema::NEW, }; - milvus::engine::meta::TableFilesSchema table_files; - status = impl.FilesByType(table_id, file_types, table_files); + milvus::engine::meta::SegmentsSchema table_files; + status = impl.FilesByType(collection_id, file_types, table_files); ASSERT_FALSE(table_files.empty()); FIU_ENABLE_FIU("MySQLMetaImpl.FilesByType.null_connection"); table_files.clear(); - status = impl.FilesByType(table_id, file_types, table_files); + status = impl.FilesByType(collection_id, file_types, table_files); ASSERT_FALSE(status.ok()); ASSERT_TRUE(table_files.empty()); fiu_disable("MySQLMetaImpl.FilesByType.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.FilesByType.throw_exception"); - status = impl.FilesByType(table_id, file_types, table_files); + status = impl.FilesByType(collection_id, file_types, table_files); ASSERT_FALSE(status.ok()); ASSERT_TRUE(table_files.empty()); fiu_disable("MySQLMetaImpl.FilesByType.throw_exception"); - status = impl.UpdateTableFilesToIndex(table_id); + status = impl.UpdateTableFilesToIndex(collection_id); ASSERT_TRUE(status.ok()); FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFilesToIndex.null_connection"); - status = impl.UpdateTableFilesToIndex(table_id); + status = impl.UpdateTableFilesToIndex(collection_id); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.UpdateTableFilesToIndex.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFilesToIndex.throw_exception"); - status = impl.UpdateTableFilesToIndex(table_id); + status = impl.UpdateTableFilesToIndex(collection_id); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.UpdateTableFilesToIndex.throw_exception"); @@ -426,26 +426,26 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DISK) { options.archive_conf_ = milvus::engine::ArchiveConf("delete", "disk:11"); int mode = milvus::engine::DBOptions::MODE::SINGLE; milvus::engine::meta::MySQLMetaImpl impl(options, mode); - auto table_id = "meta_test_group"; + auto collection_id = "meta_test_group"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl.CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl.CreateTable(collection); - milvus::engine::meta::TableSchema table_schema; - table_schema.table_id_ = ""; + milvus::engine::meta::CollectionSchema table_schema; + table_schema.collection_id_ = ""; status = impl.CreateTable(table_schema); - milvus::engine::meta::TableFilesSchema files; - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentsSchema files; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; auto cnt = 10; auto each_size = 2UL; std::vector ids; for (auto i = 0; i < cnt; ++i) { status = impl.CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; table_file.file_size_ = each_size * milvus::engine::G; status = impl.UpdateTableFile(table_file); files.push_back(table_file); @@ -463,13 +463,13 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DISK) { impl.Archive(); int i = 0; - milvus::engine::meta::TableFilesSchema files_get; - status = impl.GetTableFiles(table_file.table_id_, ids, files_get); + milvus::engine::meta::SegmentsSchema files_get; + status = impl.GetTableFiles(table_file.collection_id_, ids, files_get); ASSERT_TRUE(status.ok()); for (auto& file : files_get) { if (i >= 5) { - ASSERT_EQ(file.file_type_, milvus::engine::meta::TableFileSchema::NEW); + ASSERT_EQ(file.file_type_, milvus::engine::meta::SegmentSchema::NEW); } ++i; } @@ -480,9 +480,9 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DISK) { TEST_F(MySqlMetaTest, INVALID_INITILIZE_TEST) { fiu_init(0); - auto table_id = "meta_test_group"; - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; + auto collection_id = "meta_test_group"; + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; milvus::engine::DBMetaOptions meta = GetOptions().meta_; { FIU_ENABLE_FIU("MySQLMetaImpl.Initialize.fail_create_directory"); @@ -527,12 +527,12 @@ TEST_F(MySqlMetaTest, INVALID_INITILIZE_TEST) { } TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { - auto table_id = "meta_test_group"; + auto collection_id = "meta_test_group"; fiu_init(0); - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); uint64_t new_merge_files_cnt = 1; uint64_t new_index_files_cnt = 2; @@ -542,90 +542,90 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { uint64_t to_index_files_cnt = 6; uint64_t index_files_cnt = 7; - milvus::engine::meta::TableFileSchema table_file; - table_file.table_id_ = table.table_id_; + milvus::engine::meta::SegmentSchema table_file; + table_file.collection_id_ = collection.collection_id_; for (auto i = 0; i < new_merge_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW_MERGE; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_MERGE; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < new_index_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW_INDEX; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_INDEX; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < backup_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::BACKUP; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::BACKUP; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < new_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::NEW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < raw_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::RAW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < to_index_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::TO_INDEX; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_INDEX; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } for (auto i = 0; i < index_files_cnt; ++i) { status = impl_->CreateTableFile(table_file); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::INDEX; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::INDEX; table_file.row_count_ = 1; status = impl_->UpdateTableFile(table_file); } uint64_t total_row_count = 0; - status = impl_->Count(table_id, total_row_count); + status = impl_->Count(collection_id, total_row_count); ASSERT_TRUE(status.ok()); ASSERT_EQ(total_row_count, raw_files_cnt + to_index_files_cnt + index_files_cnt); - milvus::engine::meta::TableFilesSchema files; + milvus::engine::meta::SegmentsSchema files; status = impl_->FilesToIndex(files); ASSERT_EQ(files.size(), to_index_files_cnt); - milvus::engine::meta::TableFilesSchema table_files; - status = impl_->FilesToMerge(table.table_id_, table_files); + milvus::engine::meta::SegmentsSchema table_files; + status = impl_->FilesToMerge(collection.collection_id_, table_files); ASSERT_EQ(table_files.size(), raw_files_cnt); FIU_ENABLE_FIU("MySQLMetaImpl.FilesToMerge.null_connection"); - status = impl_->FilesToMerge(table.table_id_, table_files); + status = impl_->FilesToMerge(collection.collection_id_, table_files); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.FilesToMerge.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.FilesToMerge.throw_exception"); - status = impl_->FilesToMerge(table.table_id_, table_files); + status = impl_->FilesToMerge(collection.collection_id_, table_files); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.FilesToMerge.throw_exception"); status = impl_->FilesToMerge("notexist", table_files); ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND); - table_file.file_type_ = milvus::engine::meta::TableFileSchema::RAW; + table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW; table_file.file_size_ = milvus::engine::ONE_GB + 1; status = impl_->UpdateTableFile(table_file); ASSERT_TRUE(status.ok()); #if 0 { //skip large files - milvus::engine::meta::TableFilesSchema table_files; - status = impl_->FilesToMerge(table.table_id_, table_files); + milvus::engine::meta::SegmentsSchema table_files; + status = impl_->FilesToMerge(collection.collection_id_, table_files); ASSERT_EQ(dated_files[table_file.date_].size(), raw_files_cnt); } #endif @@ -648,7 +648,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { fiu_disable("MySQLMetaImpl.FilesToIndex.throw_exception"); table_files.clear(); - status = impl_->FilesToSearch(table_id, table_files); + status = impl_->FilesToSearch(collection_id, table_files); ASSERT_EQ(table_files.size(), to_index_files_cnt + raw_files_cnt + index_files_cnt); table_files.clear(); @@ -657,12 +657,12 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { ASSERT_EQ(table_files.size(), 0); FIU_ENABLE_FIU("MySQLMetaImpl.FilesToSearch.null_connection"); - status = impl_->FilesToSearch(table_id, table_files); + status = impl_->FilesToSearch(collection_id, table_files); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.FilesToSearch.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.FilesToSearch.throw_exception"); - status = impl_->FilesToSearch(table_id, table_files); + status = impl_->FilesToSearch(collection_id, table_files); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.FilesToSearch.throw_exception"); @@ -671,36 +671,36 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { table_files.clear(); std::vector file_types; - status = impl_->FilesByType(table.table_id_, file_types, table_files); + status = impl_->FilesByType(collection.collection_id_, file_types, table_files); ASSERT_TRUE(table_files.empty()); ASSERT_FALSE(status.ok()); file_types = { - milvus::engine::meta::TableFileSchema::NEW, milvus::engine::meta::TableFileSchema::NEW_MERGE, - milvus::engine::meta::TableFileSchema::NEW_INDEX, milvus::engine::meta::TableFileSchema::TO_INDEX, - milvus::engine::meta::TableFileSchema::INDEX, milvus::engine::meta::TableFileSchema::RAW, - milvus::engine::meta::TableFileSchema::BACKUP, + milvus::engine::meta::SegmentSchema::NEW, milvus::engine::meta::SegmentSchema::NEW_MERGE, + milvus::engine::meta::SegmentSchema::NEW_INDEX, milvus::engine::meta::SegmentSchema::TO_INDEX, + milvus::engine::meta::SegmentSchema::INDEX, milvus::engine::meta::SegmentSchema::RAW, + milvus::engine::meta::SegmentSchema::BACKUP, }; - status = impl_->FilesByType(table.table_id_, file_types, table_files); + status = impl_->FilesByType(collection.collection_id_, file_types, table_files); ASSERT_TRUE(status.ok()); uint64_t total_cnt = new_index_files_cnt + new_merge_files_cnt + backup_files_cnt + new_files_cnt + raw_files_cnt + to_index_files_cnt + index_files_cnt; ASSERT_EQ(table_files.size(), total_cnt); FIU_ENABLE_FIU("MySQLMetaImpl.DeleteTableFiles.null_connection"); - status = impl_->DeleteTableFiles(table_id); + status = impl_->DeleteTableFiles(collection_id); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.DeleteTableFiles.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.DeleteTableFiles.throw_exception"); - status = impl_->DeleteTableFiles(table_id); + status = impl_->DeleteTableFiles(collection_id); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.DeleteTableFiles.throw_exception"); - status = impl_->DeleteTableFiles(table_id); + status = impl_->DeleteTableFiles(collection_id); ASSERT_TRUE(status.ok()); - status = impl_->DropTable(table_id); + status = impl_->DropTable(collection_id); ASSERT_TRUE(status.ok()); status = impl_->CleanUpFilesWithTTL(0UL); @@ -738,27 +738,27 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { } TEST_F(MySqlMetaTest, INDEX_TEST) { - auto table_id = "index_test"; + auto collection_id = "index_test"; fiu_init(0); - milvus::engine::meta::TableSchema table; - table.table_id_ = table_id; - auto status = impl_->CreateTable(table); + milvus::engine::meta::CollectionSchema collection; + collection.collection_id_ = collection_id; + auto status = impl_->CreateTable(collection); milvus::engine::TableIndex index; index.metric_type_ = 2; index.extra_params_ = {{"nlist", 1234}}; index.engine_type_ = 3; - status = impl_->UpdateTableIndex(table_id, index); + status = impl_->UpdateTableIndex(collection_id, index); ASSERT_TRUE(status.ok()); FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableIndex.null_connection"); - status = impl_->UpdateTableIndex(table_id, index); + status = impl_->UpdateTableIndex(collection_id, index); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.UpdateTableIndex.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableIndex.throw_exception"); - status = impl_->UpdateTableIndex(table_id, index); + status = impl_->UpdateTableIndex(collection_id, index); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.UpdateTableIndex.throw_exception"); @@ -766,50 +766,50 @@ TEST_F(MySqlMetaTest, INDEX_TEST) { ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND); int64_t flag = 65536; - status = impl_->UpdateTableFlag(table_id, flag); + status = impl_->UpdateTableFlag(collection_id, flag); ASSERT_TRUE(status.ok()); FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFlag.null_connection"); - status = impl_->UpdateTableFlag(table_id, flag); + status = impl_->UpdateTableFlag(collection_id, flag); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.UpdateTableFlag.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFlag.throw_exception"); - status = impl_->UpdateTableFlag(table_id, flag); + status = impl_->UpdateTableFlag(collection_id, flag); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.UpdateTableFlag.throw_exception"); - milvus::engine::meta::TableSchema table_info; - table_info.table_id_ = table_id; + milvus::engine::meta::CollectionSchema table_info; + table_info.collection_id_ = collection_id; status = impl_->DescribeTable(table_info); ASSERT_EQ(table_info.flag_, flag); milvus::engine::TableIndex index_out; - status = impl_->DescribeTableIndex(table_id, index_out); + status = impl_->DescribeTableIndex(collection_id, index_out); ASSERT_EQ(index_out.metric_type_, index.metric_type_); ASSERT_EQ(index_out.extra_params_, index.extra_params_); ASSERT_EQ(index_out.engine_type_, index.engine_type_); - status = impl_->DropTableIndex(table_id); + status = impl_->DropTableIndex(collection_id); ASSERT_TRUE(status.ok()); - status = impl_->DescribeTableIndex(table_id, index_out); + status = impl_->DescribeTableIndex(collection_id, index_out); ASSERT_EQ(index_out.metric_type_, index.metric_type_); ASSERT_NE(index_out.engine_type_, index.engine_type_); FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTableIndex.null_connection"); - status = impl_->DescribeTableIndex(table_id, index_out); + status = impl_->DescribeTableIndex(collection_id, index_out); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.DescribeTableIndex.null_connection"); FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTableIndex.throw_exception"); - status = impl_->DescribeTableIndex(table_id, index_out); + status = impl_->DescribeTableIndex(collection_id, index_out); ASSERT_FALSE(status.ok()); fiu_disable("MySQLMetaImpl.DescribeTableIndex.throw_exception"); status = impl_->DescribeTableIndex("notexist", index_out); ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND); - status = impl_->UpdateTableFilesToIndex(table_id); + status = impl_->UpdateTableFilesToIndex(collection_id); ASSERT_TRUE(status.ok()); } diff --git a/core/unittest/db/test_misc.cpp b/core/unittest/db/test_misc.cpp index 4a0ab67315..02090eeb46 100644 --- a/core/unittest/db/test_misc.cpp +++ b/core/unittest/db/test_misc.cpp @@ -119,9 +119,9 @@ TEST(DBMiscTest, UTILS_TEST) { // status = engine::utils::CreateTablePath(options, TABLE_NAME); // ASSERT_FALSE(status.ok()); - milvus::engine::meta::TableFileSchema file; + milvus::engine::meta::SegmentSchema file; file.id_ = 50; - file.table_id_ = TABLE_NAME; + file.collection_id_ = TABLE_NAME; file.file_type_ = 3; file.date_ = 155000; status = milvus::engine::utils::GetTableFilePath(options, file); @@ -181,11 +181,11 @@ TEST(DBMiscTest, SAFE_ID_GENERATOR_TEST) { TEST(DBMiscTest, CHECKER_TEST) { { milvus::engine::IndexFailedChecker checker; - milvus::engine::meta::TableFileSchema schema; - schema.table_id_ = "aaa"; + milvus::engine::meta::SegmentSchema schema; + schema.collection_id_ = "aaa"; schema.file_id_ = "5000"; checker.MarkFailedIndexFile(schema, "5000 fail"); - schema.table_id_ = "bbb"; + schema.collection_id_ = "bbb"; schema.file_id_ = "5001"; checker.MarkFailedIndexFile(schema, "5001 fail"); @@ -193,12 +193,12 @@ TEST(DBMiscTest, CHECKER_TEST) { checker.GetErrMsgForTable("aaa", err_msg); ASSERT_EQ(err_msg, "5000 fail"); - schema.table_id_ = "bbb"; + schema.collection_id_ = "bbb"; schema.file_id_ = "5002"; checker.MarkFailedIndexFile(schema, "5002 fail"); checker.MarkFailedIndexFile(schema, "5002 fail"); - milvus::engine::meta::TableFilesSchema table_files = {schema}; + milvus::engine::meta::SegmentsSchema table_files = {schema}; checker.IgnoreFailedIndexFiles(table_files); ASSERT_TRUE(table_files.empty()); @@ -212,16 +212,16 @@ TEST(DBMiscTest, CHECKER_TEST) { { milvus::engine::OngoingFileChecker& checker = milvus::engine::OngoingFileChecker::GetInstance(); - milvus::engine::meta::TableFileSchema schema; - schema.table_id_ = "aaa"; + milvus::engine::meta::SegmentSchema schema; + schema.collection_id_ = "aaa"; schema.file_id_ = "5000"; checker.MarkOngoingFile(schema); ASSERT_TRUE(checker.IsIgnored(schema)); - schema.table_id_ = "bbb"; + schema.collection_id_ = "bbb"; schema.file_id_ = "5001"; - milvus::engine::meta::TableFilesSchema table_files = {schema}; + milvus::engine::meta::SegmentsSchema table_files = {schema}; checker.MarkOngoingFiles(table_files); ASSERT_TRUE(checker.IsIgnored(schema)); @@ -229,7 +229,7 @@ TEST(DBMiscTest, CHECKER_TEST) { checker.UnmarkOngoingFile(schema); ASSERT_FALSE(checker.IsIgnored(schema)); - schema.table_id_ = "aaa"; + schema.collection_id_ = "aaa"; schema.file_id_ = "5000"; checker.UnmarkOngoingFile(schema); ASSERT_FALSE(checker.IsIgnored(schema)); diff --git a/core/unittest/db/test_search_by_id.cpp b/core/unittest/db/test_search_by_id.cpp index 568626296b..7e4b92a64e 100644 --- a/core/unittest/db/test_search_by_id.cpp +++ b/core/unittest/db/test_search_by_id.cpp @@ -37,15 +37,15 @@ std::string GetTableName() { auto now = std::chrono::system_clock::now(); auto micros = std::chrono::duration_cast(now.time_since_epoch()).count(); - static std::string table_name = std::to_string(micros); - return table_name; + static std::string collection_name = std::to_string(micros); + return collection_name; } -milvus::engine::meta::TableSchema +milvus::engine::meta::CollectionSchema BuildTableSchema() { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = GetTableName(); + table_info.collection_id_ = GetTableName(); table_info.metric_type_ = (int32_t)milvus::engine::MetricType::L2; table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT; return table_info; @@ -64,11 +64,11 @@ BuildVectors(uint64_t n, milvus::engine::VectorsData& vectors) { } // namespace TEST_F(SearchByIdTest, basic) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -81,7 +81,7 @@ TEST_F(SearchByIdTest, basic) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -108,7 +108,7 @@ TEST_F(SearchByIdTest, basic) { milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, i, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, i, result_ids, result_distances); ASSERT_EQ(result_ids[0], i); ASSERT_LT(result_distances[0], 1e-4); @@ -116,11 +116,11 @@ TEST_F(SearchByIdTest, basic) { } TEST_F(SearchByIdTest, with_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -133,7 +133,7 @@ TEST_F(SearchByIdTest, with_index) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -154,7 +154,7 @@ TEST_F(SearchByIdTest, with_index) { milvus::engine::TableIndex index; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; index.extra_params_ = {{"nlist", 10}}; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); const int topk = 10, nprobe = 10; @@ -166,7 +166,7 @@ TEST_F(SearchByIdTest, with_index) { milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, i, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, i, result_ids, result_distances); ASSERT_EQ(result_ids[0], i); ASSERT_LT(result_distances[0], 1e-3); @@ -174,11 +174,11 @@ TEST_F(SearchByIdTest, with_index) { } TEST_F(SearchByIdTest, with_delete) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -191,7 +191,7 @@ TEST_F(SearchByIdTest, with_delete) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -213,7 +213,7 @@ TEST_F(SearchByIdTest, with_delete) { for (auto& id : ids_to_search) { ids_to_delete.emplace_back(id); } - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); @@ -227,7 +227,7 @@ TEST_F(SearchByIdTest, with_delete) { milvus::engine::ResultIds result_ids; milvus::engine::ResultDistances result_distances; - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, i, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, i, result_ids, result_distances); ASSERT_EQ(result_ids[0], -1); ASSERT_EQ(result_distances[0], std::numeric_limits::max()); @@ -235,11 +235,11 @@ TEST_F(SearchByIdTest, with_delete) { } TEST_F(GetVectorByIdTest, basic) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -252,7 +252,7 @@ TEST_F(GetVectorByIdTest, basic) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -280,10 +280,10 @@ TEST_F(GetVectorByIdTest, basic) { milvus::engine::ResultDistances result_distances; milvus::engine::VectorsData vector; - stat = db_->GetVectorByID(table_info.table_id_, id, vector); + stat = db_->GetVectorByID(table_info.collection_id_, id, vector); ASSERT_TRUE(stat.ok()); - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, vector, result_ids, + stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, vector, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids[0], id); @@ -292,11 +292,11 @@ TEST_F(GetVectorByIdTest, basic) { } TEST_F(GetVectorByIdTest, with_index) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -309,7 +309,7 @@ TEST_F(GetVectorByIdTest, with_index) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -330,7 +330,7 @@ TEST_F(GetVectorByIdTest, with_index) { milvus::engine::TableIndex index; index.extra_params_ = {{"nlist", 10}}; index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8; - stat = db_->CreateIndex(table_info.table_id_, index); + stat = db_->CreateIndex(table_info.collection_id_, index); ASSERT_TRUE(stat.ok()); const int topk = 10, nprobe = 10; @@ -343,10 +343,10 @@ TEST_F(GetVectorByIdTest, with_index) { milvus::engine::ResultDistances result_distances; milvus::engine::VectorsData vector; - stat = db_->GetVectorByID(table_info.table_id_, id, vector); + stat = db_->GetVectorByID(table_info.collection_id_, id, vector); ASSERT_TRUE(stat.ok()); - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, vector, result_ids, + stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, vector, result_ids, result_distances); ASSERT_EQ(result_ids[0], id); ASSERT_LT(result_distances[0], 1e-3); @@ -354,11 +354,11 @@ TEST_F(GetVectorByIdTest, with_index) { } TEST_F(GetVectorByIdTest, with_delete) { - milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + milvus::engine::meta::CollectionSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -371,7 +371,7 @@ TEST_F(GetVectorByIdTest, with_delete) { xb.id_array_.push_back(i); } - stat = db_->InsertVectors(table_info.table_id_, "", xb); + stat = db_->InsertVectors(table_info.collection_id_, "", xb); ASSERT_TRUE(stat.ok()); std::random_device rd; @@ -393,7 +393,7 @@ TEST_F(GetVectorByIdTest, with_delete) { for (auto& id : ids_to_search) { ids_to_delete.emplace_back(id); } - stat = db_->DeleteVectors(table_info.table_id_, ids_to_delete); + stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete); stat = db_->Flush(); ASSERT_TRUE(stat.ok()); @@ -405,7 +405,7 @@ TEST_F(GetVectorByIdTest, with_delete) { milvus::engine::ResultDistances result_distances; milvus::engine::VectorsData vector; - stat = db_->GetVectorByID(table_info.table_id_, id, vector); + stat = db_->GetVectorByID(table_info.collection_id_, id, vector); ASSERT_TRUE(stat.ok()); ASSERT_TRUE(vector.float_data_.empty()); ASSERT_EQ(vector.vector_count_, 0); @@ -413,16 +413,16 @@ TEST_F(GetVectorByIdTest, with_delete) { } TEST_F(SearchByIdTest, BINARY) { - milvus::engine::meta::TableSchema table_info; + milvus::engine::meta::CollectionSchema table_info; table_info.dimension_ = TABLE_DIM; - table_info.table_id_ = GetTableName(); + table_info.collection_id_ = GetTableName(); table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_BIN_IDMAP; table_info.metric_type_ = (int32_t)milvus::engine::MetricType::JACCARD; auto stat = db_->CreateTable(table_info); ASSERT_TRUE(stat.ok()); - milvus::engine::meta::TableSchema table_info_get; - table_info_get.table_id_ = table_info.table_id_; + milvus::engine::meta::CollectionSchema table_info_get; + table_info_get.collection_id_ = table_info.collection_id_; stat = db_->DescribeTable(table_info_get); ASSERT_TRUE(stat.ok()); ASSERT_EQ(table_info_get.dimension_, TABLE_DIM); @@ -448,7 +448,7 @@ TEST_F(SearchByIdTest, BINARY) { vectors.id_array_.emplace_back(k * nb + i); } - stat = db_->InsertVectors(table_info.table_id_, "", vectors); + stat = db_->InsertVectors(table_info.collection_id_, "", vectors); ASSERT_TRUE(stat.ok()); } @@ -468,7 +468,7 @@ TEST_F(SearchByIdTest, BINARY) { ASSERT_TRUE(stat.ok()); uint64_t row_count; - stat = db_->GetTableRowCount(table_info.table_id_, row_count); + stat = db_->GetTableRowCount(table_info.collection_id_, row_count); ASSERT_TRUE(stat.ok()); ASSERT_EQ(row_count, nb * insert_loop); @@ -482,11 +482,11 @@ TEST_F(SearchByIdTest, BINARY) { milvus::engine::ResultDistances result_distances; milvus::engine::VectorsData vector; - stat = db_->GetVectorByID(table_info.table_id_, id, vector); + stat = db_->GetVectorByID(table_info.collection_id_, id, vector); ASSERT_TRUE(stat.ok()); ASSERT_EQ(vector.vector_count_, 1); - stat = db_->Query(dummy_context_, table_info.table_id_, tags, topk, json_params, vector, result_ids, + stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, vector, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids[0], id); @@ -496,7 +496,7 @@ TEST_F(SearchByIdTest, BINARY) { result_ids.clear(); result_distances.clear(); - stat = db_->QueryByID(dummy_context_, table_info.table_id_, tags, topk, json_params, id, result_ids, + stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids, result_distances); ASSERT_TRUE(stat.ok()); ASSERT_EQ(result_ids[0], id); diff --git a/core/unittest/db/test_wal.cpp b/core/unittest/db/test_wal.cpp index 8746c2cbf3..2b67185b29 100644 --- a/core/unittest/db/test_wal.cpp +++ b/core/unittest/db/test_wal.cpp @@ -53,13 +53,13 @@ class TestWalMeta : public SqliteMetaImpl { } Status - CreateTable(TableSchema& table_schema) override { + CreateTable(CollectionSchema& table_schema) override { tables_.push_back(table_schema); return Status::OK(); } Status - AllTables(std::vector& table_schema_array) override { + AllTables(std::vector& table_schema_array) override { table_schema_array = tables_; return Status::OK(); } @@ -77,7 +77,7 @@ class TestWalMeta : public SqliteMetaImpl { } private: - std::vector tables_; + std::vector tables_; uint64_t global_lsn_ = 0; }; @@ -87,7 +87,7 @@ class TestWalMetaError : public SqliteMetaImpl { } Status - AllTables(std::vector& table_schema_array) override { + AllTables(std::vector& table_schema_array) override { return Status(DB_ERROR, "error"); } }; @@ -275,7 +275,7 @@ TEST(WalTest, BUFFER_TEST) { // write 0 record[0].type = milvus::engine::wal::MXLogType::InsertVector; - record[0].table_id = "insert_table"; + record[0].collection_id = "insert_table"; record[0].partition_tag = "parti1"; record[0].length = 50; record[0].ids = (milvus::engine::IDNumber*)malloc(record[0].length * sizeof(milvus::engine::IDNumber)); @@ -287,7 +287,7 @@ TEST(WalTest, BUFFER_TEST) { // write 1 record[1].type = milvus::engine::wal::MXLogType::Delete; - record[1].table_id = "insert_table"; + record[1].collection_id = "insert_table"; record[1].partition_tag = "parti1"; record[1].length = 10; record[1].ids = (milvus::engine::IDNumber*)malloc(record[0].length * sizeof(milvus::engine::IDNumber)); @@ -300,7 +300,7 @@ TEST(WalTest, BUFFER_TEST) { // read 0 ASSERT_EQ(buffer.Next(record[1].lsn, read_rst), milvus::WAL_SUCCESS); ASSERT_EQ(read_rst.type, record[0].type); - ASSERT_EQ(read_rst.table_id, record[0].table_id); + ASSERT_EQ(read_rst.collection_id, record[0].collection_id); ASSERT_EQ(read_rst.partition_tag, record[0].partition_tag); ASSERT_EQ(read_rst.length, record[0].length); ASSERT_EQ(memcmp(read_rst.ids, record[0].ids, read_rst.length * sizeof(milvus::engine::IDNumber)), 0); @@ -310,7 +310,7 @@ TEST(WalTest, BUFFER_TEST) { // read 1 ASSERT_EQ(buffer.Next(record[1].lsn, read_rst), milvus::WAL_SUCCESS); ASSERT_EQ(read_rst.type, record[1].type); - ASSERT_EQ(read_rst.table_id, record[1].table_id); + ASSERT_EQ(read_rst.collection_id, record[1].collection_id); ASSERT_EQ(read_rst.partition_tag, record[1].partition_tag); ASSERT_EQ(read_rst.length, record[1].length); ASSERT_EQ(memcmp(read_rst.ids, record[1].ids, read_rst.length * sizeof(milvus::engine::IDNumber)), 0); @@ -323,7 +323,7 @@ TEST(WalTest, BUFFER_TEST) { // write 2 (new file) record[2].type = milvus::engine::wal::MXLogType::InsertVector; - record[2].table_id = "insert_table"; + record[2].collection_id = "insert_table"; record[2].partition_tag = "parti1"; record[2].length = 50; record[2].ids = (milvus::engine::IDNumber*)malloc(record[2].length * sizeof(milvus::engine::IDNumber)); @@ -335,7 +335,7 @@ TEST(WalTest, BUFFER_TEST) { // write 3 (new file) record[3].type = milvus::engine::wal::MXLogType::InsertBinary; - record[3].table_id = "insert_table"; + record[3].collection_id = "insert_table"; record[3].partition_tag = "parti1"; record[3].length = 100; record[3].ids = (milvus::engine::IDNumber*)malloc(record[3].length * sizeof(milvus::engine::IDNumber)); @@ -357,7 +357,7 @@ TEST(WalTest, BUFFER_TEST) { // read 2 ASSERT_EQ(buffer.Next(record[3].lsn, read_rst), milvus::WAL_SUCCESS); ASSERT_EQ(read_rst.type, record[2].type); - ASSERT_EQ(read_rst.table_id, record[2].table_id); + ASSERT_EQ(read_rst.collection_id, record[2].collection_id); ASSERT_EQ(read_rst.partition_tag, record[2].partition_tag); ASSERT_EQ(read_rst.length, record[2].length); ASSERT_EQ(memcmp(read_rst.ids, record[2].ids, read_rst.length * sizeof(milvus::engine::IDNumber)), 0); @@ -367,7 +367,7 @@ TEST(WalTest, BUFFER_TEST) { // read 3 ASSERT_EQ(buffer.Next(record[3].lsn, read_rst), milvus::WAL_SUCCESS); ASSERT_EQ(read_rst.type, record[3].type); - ASSERT_EQ(read_rst.table_id, record[3].table_id); + ASSERT_EQ(read_rst.collection_id, record[3].collection_id); ASSERT_EQ(read_rst.partition_tag, record[3].partition_tag); ASSERT_EQ(read_rst.length, record[3].length); ASSERT_EQ(memcmp(read_rst.ids, record[3].ids, read_rst.length * sizeof(milvus::engine::IDNumber)), 0); @@ -382,7 +382,7 @@ TEST(WalTest, BUFFER_TEST) { ASSERT_EQ(buffer.Append(empty), milvus::WAL_SUCCESS); ASSERT_EQ(buffer.Next(empty.lsn, read_rst), milvus::WAL_SUCCESS); ASSERT_EQ(read_rst.type, milvus::engine::wal::MXLogType::None); - ASSERT_TRUE(read_rst.table_id.empty()); + ASSERT_TRUE(read_rst.collection_id.empty()); ASSERT_TRUE(read_rst.partition_tag.empty()); ASSERT_EQ(read_rst.length, 0); ASSERT_EQ(read_rst.data_size, 0); @@ -419,18 +419,18 @@ TEST(WalTest, MANAGER_INIT_TEST) { milvus::engine::DBMetaOptions opt = {WAL_GTEST_PATH}; milvus::engine::meta::MetaPtr meta = std::make_shared(opt); - milvus::engine::meta::TableSchema table_schema_1; - table_schema_1.table_id_ = "table1"; + milvus::engine::meta::CollectionSchema table_schema_1; + table_schema_1.collection_id_ = "table1"; table_schema_1.flush_lsn_ = (uint64_t)1 << 32 | 60; meta->CreateTable(table_schema_1); - milvus::engine::meta::TableSchema table_schema_2; - table_schema_2.table_id_ = "table2"; + milvus::engine::meta::CollectionSchema table_schema_2; + table_schema_2.collection_id_ = "table2"; table_schema_2.flush_lsn_ = (uint64_t)1 << 32 | 20; meta->CreateTable(table_schema_2); - milvus::engine::meta::TableSchema table_schema_3; - table_schema_3.table_id_ = "table3"; + milvus::engine::meta::CollectionSchema table_schema_3; + table_schema_3.collection_id_ = "table3"; table_schema_3.flush_lsn_ = (uint64_t)2 << 32 | 40; meta->CreateTable(table_schema_3); @@ -465,8 +465,8 @@ TEST(WalTest, MANAGER_APPEND_FAILED) { milvus::engine::DBMetaOptions opt = {WAL_GTEST_PATH}; milvus::engine::meta::MetaPtr meta = std::make_shared(opt); - milvus::engine::meta::TableSchema schema; - schema.table_id_ = "table1"; + milvus::engine::meta::CollectionSchema schema; + schema.collection_id_ = "table1"; schema.flush_lsn_ = 0; meta->CreateTable(schema); @@ -485,12 +485,12 @@ TEST(WalTest, MANAGER_APPEND_FAILED) { std::vector ids(1, 0); std::vector data_float(1024, 0); - ASSERT_FALSE(manager->Insert(schema.table_id_, "", ids, data_float)); + ASSERT_FALSE(manager->Insert(schema.collection_id_, "", ids, data_float)); ids.clear(); data_float.clear(); - ASSERT_FALSE(manager->Insert(schema.table_id_, "", ids, data_float)); - ASSERT_FALSE(manager->DeleteById(schema.table_id_, ids)); + ASSERT_FALSE(manager->Insert(schema.collection_id_, "", ids, data_float)); + ASSERT_FALSE(manager->DeleteById(schema.collection_id_, ids)); } TEST(WalTest, MANAGER_RECOVERY_TEST) { @@ -508,15 +508,15 @@ TEST(WalTest, MANAGER_RECOVERY_TEST) { manager = std::make_shared(wal_config); ASSERT_EQ(manager->Init(meta), milvus::WAL_SUCCESS); - milvus::engine::meta::TableSchema schema; - schema.table_id_ = "table"; + milvus::engine::meta::CollectionSchema schema; + schema.collection_id_ = "collection"; schema.flush_lsn_ = 0; meta->CreateTable(schema); std::vector ids(1024, 0); std::vector data_float(1024 * 512, 0); - manager->CreateTable(schema.table_id_); - ASSERT_TRUE(manager->Insert(schema.table_id_, "", ids, data_float)); + manager->CreateTable(schema.collection_id_); + ASSERT_TRUE(manager->Insert(schema.collection_id_, "", ids, data_float)); // recovery manager = std::make_shared(wal_config); @@ -529,7 +529,7 @@ TEST(WalTest, MANAGER_RECOVERY_TEST) { break; } ASSERT_EQ(record.type, milvus::engine::wal::MXLogType::InsertVector); - ASSERT_EQ(record.table_id, schema.table_id_); + ASSERT_EQ(record.collection_id, schema.collection_id_); ASSERT_EQ(record.partition_tag, ""); } @@ -604,18 +604,18 @@ TEST(WalTest, MANAGER_TEST) { while (1) { ASSERT_EQ(manager->GetNextRecord(record), milvus::WAL_SUCCESS); if (record.type == milvus::engine::wal::MXLogType::Flush) { - ASSERT_EQ(record.table_id, table_id_1); + ASSERT_EQ(record.collection_id, table_id_1); ASSERT_EQ(new_lsn, flush_lsn); manager->TableFlushed(table_id_1, new_lsn); break; } else { ASSERT_TRUE((record.type == milvus::engine::wal::MXLogType::InsertVector && - record.table_id == table_id_1) || + record.collection_id == table_id_1) || (record.type == milvus::engine::wal::MXLogType::Delete && - record.table_id == table_id_1) || + record.collection_id == table_id_1) || (record.type == milvus::engine::wal::MXLogType::InsertBinary && - record.table_id == table_id_2)); + record.collection_id == table_id_2)); new_lsn = record.lsn; } } @@ -626,7 +626,7 @@ TEST(WalTest, MANAGER_TEST) { ASSERT_EQ(manager->GetNextRecord(record), milvus::WAL_SUCCESS); ASSERT_EQ(record.type, milvus::engine::wal::MXLogType::Flush); - ASSERT_EQ(record.table_id, table_id_2); + ASSERT_EQ(record.collection_id, table_id_2); manager->TableFlushed(table_id_2, flush_lsn); ASSERT_EQ(manager->Flush(table_id_2), 0); @@ -636,7 +636,7 @@ TEST(WalTest, MANAGER_TEST) { ASSERT_EQ(manager->GetNextRecord(record), milvus::WAL_SUCCESS); ASSERT_EQ(record.type, milvus::engine::wal::MXLogType::Flush); - ASSERT_TRUE(record.table_id.empty()); + ASSERT_TRUE(record.collection_id.empty()); } TEST(WalTest, MANAGER_SAME_NAME_TABLE) { @@ -674,7 +674,7 @@ TEST(WalTest, MANAGER_SAME_NAME_TABLE) { ASSERT_TRUE(manager->DeleteById(table_id_1, ids)); ASSERT_TRUE(manager->DeleteById(table_id_2, ids)); - // re-create table + // re-create collection manager->DropTable(table_id_1); manager->CreateTable(table_id_1); @@ -684,7 +684,7 @@ TEST(WalTest, MANAGER_SAME_NAME_TABLE) { if (record.type == milvus::engine::wal::MXLogType::None) { break; } - ASSERT_EQ(record.table_id, table_id_2); + ASSERT_EQ(record.collection_id, table_id_2); } } diff --git a/core/unittest/metrics/test_metrics.cpp b/core/unittest/metrics/test_metrics.cpp index ed74aefddf..cb3fd93fe2 100644 --- a/core/unittest/metrics/test_metrics.cpp +++ b/core/unittest/metrics/test_metrics.cpp @@ -70,13 +70,13 @@ TEST_F(MetricTest, METRIC_TEST) { static const char* group_name = "test_group"; static const int group_dim = 256; - milvus::engine::meta::TableSchema group_info; + milvus::engine::meta::CollectionSchema group_info; group_info.dimension_ = group_dim; - group_info.table_id_ = group_name; + group_info.collection_id_ = group_name; auto stat = db_->CreateTable(group_info); - milvus::engine::meta::TableSchema group_info_get; - group_info_get.table_id_ = group_name; + milvus::engine::meta::CollectionSchema group_info_get; + group_info_get.collection_id_ = group_name; stat = db_->DescribeTable(group_info_get); int nb = 50; @@ -157,13 +157,13 @@ TEST_F(MetricTest, COLLECTOR_METRICS_TEST) { milvus::server::CollectAddMetrics add_metrics(10, 128); - milvus::server::CollectDurationMetrics duration_metrics_raw(milvus::engine::meta::TableFileSchema::RAW); - milvus::server::CollectDurationMetrics duration_metrics_index(milvus::engine::meta::TableFileSchema::TO_INDEX); - milvus::server::CollectDurationMetrics duration_metrics_delete(milvus::engine::meta::TableFileSchema::TO_DELETE); + milvus::server::CollectDurationMetrics duration_metrics_raw(milvus::engine::meta::SegmentSchema::RAW); + milvus::server::CollectDurationMetrics duration_metrics_index(milvus::engine::meta::SegmentSchema::TO_INDEX); + milvus::server::CollectDurationMetrics duration_metrics_delete(milvus::engine::meta::SegmentSchema::TO_DELETE); - milvus::server::CollectSearchTaskMetrics search_metrics_raw(milvus::engine::meta::TableFileSchema::RAW); - milvus::server::CollectSearchTaskMetrics search_metrics_index(milvus::engine::meta::TableFileSchema::TO_INDEX); - milvus::server::CollectSearchTaskMetrics search_metrics_delete(milvus::engine::meta::TableFileSchema::TO_DELETE); + milvus::server::CollectSearchTaskMetrics search_metrics_raw(milvus::engine::meta::SegmentSchema::RAW); + milvus::server::CollectSearchTaskMetrics search_metrics_index(milvus::engine::meta::SegmentSchema::TO_INDEX); + milvus::server::CollectSearchTaskMetrics search_metrics_delete(milvus::engine::meta::SegmentSchema::TO_DELETE); milvus::server::MetricCollector metric_collector(); } diff --git a/core/unittest/scheduler/test_job.cpp b/core/unittest/scheduler/test_job.cpp index a1a37b2591..07aa8343f0 100644 --- a/core/unittest/scheduler/test_job.cpp +++ b/core/unittest/scheduler/test_job.cpp @@ -32,7 +32,7 @@ TEST(JobTest, TestJob) { TestJob test_job; test_job.Dump(); - auto delete_ptr = std::make_shared("table_id", nullptr, 1); + auto delete_ptr = std::make_shared("collection_id", nullptr, 1); delete_ptr->Dump(); engine::VectorsData vectors; diff --git a/core/unittest/scheduler/test_optimizer.cpp b/core/unittest/scheduler/test_optimizer.cpp index 7d04124786..0757fc235c 100644 --- a/core/unittest/scheduler/test_optimizer.cpp +++ b/core/unittest/scheduler/test_optimizer.cpp @@ -61,7 +61,7 @@ TEST(OptimizerTest, TEST_OPTIMIZER) { fiu_disable("get_gpu_config_search_resources.disable_gpu_resource_fail"); fiu_disable("check_config_gpu_search_threshold_fail"); - auto file = std::make_shared(); + auto file = std::make_shared(); file->engine_type_ = (int)engine::EngineType::FAISS_IVFFLAT; file->index_params_ = "{ \"nlist\": 100 }"; file->dimension_ = 64; diff --git a/core/unittest/scheduler/test_resource.cpp b/core/unittest/scheduler/test_resource.cpp index af3ed9d8f4..01641f2655 100644 --- a/core/unittest/scheduler/test_resource.cpp +++ b/core/unittest/scheduler/test_resource.cpp @@ -154,7 +154,7 @@ class ResourceAdvanceTest : public testing::Test { TEST_F(ResourceAdvanceTest, DISK_RESOURCE_TEST) { const uint64_t NUM = max_once_load; std::vector> tasks; - TableFileSchemaPtr dummy = nullptr; + SegmentSchemaPtr dummy = nullptr; for (uint64_t i = 0; i < NUM; ++i) { auto label = std::make_shared(disk_resource_); auto task = std::make_shared(std::make_shared("dummy_request_id"), dummy, label); @@ -182,7 +182,7 @@ TEST_F(ResourceAdvanceTest, DISK_RESOURCE_TEST) { TEST_F(ResourceAdvanceTest, CPU_RESOURCE_TEST) { const uint64_t NUM = max_once_load; std::vector> tasks; - TableFileSchemaPtr dummy = nullptr; + SegmentSchemaPtr dummy = nullptr; for (uint64_t i = 0; i < NUM; ++i) { auto label = std::make_shared(cpu_resource_); auto task = std::make_shared(std::make_shared("dummy_request_id"), dummy, label); @@ -216,7 +216,7 @@ TEST_F(ResourceAdvanceTest, CPU_RESOURCE_TEST) { TEST_F(ResourceAdvanceTest, GPU_RESOURCE_TEST) { const uint64_t NUM = max_once_load; std::vector> tasks; - TableFileSchemaPtr dummy = nullptr; + SegmentSchemaPtr dummy = nullptr; for (uint64_t i = 0; i < NUM; ++i) { auto label = std::make_shared(gpu_resource_); auto task = std::make_shared(std::make_shared("dummy_request_id"), dummy, label); @@ -244,7 +244,7 @@ TEST_F(ResourceAdvanceTest, GPU_RESOURCE_TEST) { TEST_F(ResourceAdvanceTest, TEST_RESOURCE_TEST) { const uint64_t NUM = max_once_load; std::vector> tasks; - TableFileSchemaPtr dummy = nullptr; + SegmentSchemaPtr dummy = nullptr; for (uint64_t i = 0; i < NUM; ++i) { auto label = std::make_shared(test_resource_); auto task = std::make_shared(std::make_shared("dummy_request_id"), dummy, label); diff --git a/core/unittest/scheduler/test_resource_mgr.cpp b/core/unittest/scheduler/test_resource_mgr.cpp index 3ab5af76e8..39c63f7fc8 100644 --- a/core/unittest/scheduler/test_resource_mgr.cpp +++ b/core/unittest/scheduler/test_resource_mgr.cpp @@ -196,7 +196,7 @@ TEST_F(ResourceMgrAdvanceTest, REGISTER_SUBSCRIBER) { flag = true; }; mgr1_->RegisterSubscriber(callback); - TableFileSchemaPtr dummy = nullptr; + SegmentSchemaPtr dummy = nullptr; disk_res->task_table().Put( std::make_shared(std::make_shared("dummy_request_id"), dummy, nullptr)); sleep(1); diff --git a/core/unittest/scheduler/test_scheduler.cpp b/core/unittest/scheduler/test_scheduler.cpp index 5b5d8bb1bf..75d08a731e 100644 --- a/core/unittest/scheduler/test_scheduler.cpp +++ b/core/unittest/scheduler/test_scheduler.cpp @@ -194,7 +194,7 @@ class SchedulerTest2 : public testing::Test { // TEST_F(SchedulerTest2, SPECIFIED_RESOURCE_TEST) { // const uint64_t NUM = 2; // std::vector> tasks; -// TableFileSchemaPtr dummy = std::make_shared(); +// SegmentSchemaPtr dummy = std::make_shared(); // dummy->location_ = "location"; // // for (uint64_t i = 0; i < NUM; ++i) { diff --git a/core/unittest/scheduler/test_task.cpp b/core/unittest/scheduler/test_task.cpp index 6079e537d7..8084c3aad1 100644 --- a/core/unittest/scheduler/test_task.cpp +++ b/core/unittest/scheduler/test_task.cpp @@ -35,7 +35,7 @@ TEST(TaskTest, INVALID_INDEX) { auto trace_context = std::make_shared(mock_span); dummy_context->SetTraceContext(trace_context); - TableFileSchemaPtr dummy_file = std::make_shared(); + SegmentSchemaPtr dummy_file = std::make_shared(); dummy_file->index_params_ = "{ \"nlist\": 16384 }"; dummy_file->dimension_ = 64; auto search_task = @@ -51,7 +51,7 @@ TEST(TaskTest, INVALID_INDEX) { TEST(TaskTest, TEST_TASK) { auto dummy_context = std::make_shared("dummy_request_id"); - auto file = std::make_shared(); + auto file = std::make_shared(); file->index_params_ = "{ \"nlist\": 16384 }"; file->dimension_ = 64; auto label = std::make_shared(); @@ -74,7 +74,7 @@ TEST(TaskTest, TEST_TASK) { options.insert_cache_immediately_ = true; auto meta_ptr = std::make_shared(options.meta_); - file->table_id_ = "111"; + file->collection_id_ = "111"; file->location_ = "/tmp/milvus_test/index_file1.txt"; auto build_index_job = std::make_shared(meta_ptr, options); XBuildIndexTask build_index_task(file, label); diff --git a/core/unittest/scheduler/test_tasktable.cpp b/core/unittest/scheduler/test_tasktable.cpp index 89a719b220..c7b4691a1e 100644 --- a/core/unittest/scheduler/test_tasktable.cpp +++ b/core/unittest/scheduler/test_tasktable.cpp @@ -154,7 +154,7 @@ class TaskTableBaseTest : public ::testing::Test { protected: void SetUp() override { - milvus::scheduler::TableFileSchemaPtr dummy = nullptr; + milvus::scheduler::SegmentSchemaPtr dummy = nullptr; invalid_task_ = nullptr; task1_ = std::make_shared( std::make_shared("dummy_request_id"), dummy, nullptr); @@ -313,7 +313,7 @@ class TaskTableAdvanceTest : public ::testing::Test { protected: void SetUp() override { - milvus::scheduler::TableFileSchemaPtr dummy = nullptr; + milvus::scheduler::SegmentSchemaPtr dummy = nullptr; for (uint64_t i = 0; i < 8; ++i) { auto task = std::make_shared( std::make_shared("dummy_request_id"), dummy, nullptr); diff --git a/core/unittest/server/test_rpc.cpp b/core/unittest/server/test_rpc.cpp index 6fd5a5da83..9cae71ecc5 100644 --- a/core/unittest/server/test_rpc.cpp +++ b/core/unittest/server/test_rpc.cpp @@ -103,7 +103,7 @@ class RpcHandlerTest : public testing::Test { milvus::server::DBWrapper::GetInstance().StartService(); - // initialize handler, create table + // initialize handler, create collection handler = std::make_shared(opentracing::Tracer::Global()); dummy_context = std::make_shared("dummy_request_id"); opentracing::mocktracer::MockTracerOptions tracer_options; @@ -261,41 +261,41 @@ TEST_F(RpcHandlerTest, INDEX_TEST) { fiu_disable("CreateIndexRequest.OnExecute.ip_meteric"); #endif - ::milvus::grpc::TableName table_name; + ::milvus::grpc::TableName collection_name; ::milvus::grpc::IndexParam index_param; - handler->DescribeIndex(&context, &table_name, &index_param); - table_name.set_table_name("test4"); - handler->DescribeIndex(&context, &table_name, &index_param); - table_name.set_table_name(TABLE_NAME); - handler->DescribeIndex(&context, &table_name, &index_param); + handler->DescribeIndex(&context, &collection_name, &index_param); + collection_name.set_table_name("test4"); + handler->DescribeIndex(&context, &collection_name, &index_param); + collection_name.set_table_name(TABLE_NAME); + handler->DescribeIndex(&context, &collection_name, &index_param); fiu_init(0); fiu_enable("DescribeIndexRequest.OnExecute.throw_std_exception", 1, NULL, 0); - handler->DescribeIndex(&context, &table_name, &index_param); + handler->DescribeIndex(&context, &collection_name, &index_param); fiu_disable("DescribeIndexRequest.OnExecute.throw_std_exception"); ::milvus::grpc::Status status; - table_name.Clear(); - handler->DropIndex(&context, &table_name, &status); - table_name.set_table_name("test5"); - handler->DropIndex(&context, &table_name, &status); + collection_name.Clear(); + handler->DropIndex(&context, &collection_name, &status); + collection_name.set_table_name("test5"); + handler->DropIndex(&context, &collection_name, &status); - table_name.set_table_name(TABLE_NAME); + collection_name.set_table_name(TABLE_NAME); fiu_init(0); fiu_enable("DropIndexRequest.OnExecute.table_not_exist", 1, NULL, 0); - handler->DropIndex(&context, &table_name, &status); + handler->DropIndex(&context, &collection_name, &status); fiu_disable("DropIndexRequest.OnExecute.table_not_exist"); fiu_enable("DropIndexRequest.OnExecute.drop_index_fail", 1, NULL, 0); - handler->DropIndex(&context, &table_name, &status); + handler->DropIndex(&context, &collection_name, &status); fiu_disable("DropIndexRequest.OnExecute.drop_index_fail"); fiu_enable("DropIndexRequest.OnExecute.throw_std_exception", 1, NULL, 0); - handler->DropIndex(&context, &table_name, &status); + handler->DropIndex(&context, &collection_name, &status); fiu_disable("DropIndexRequest.OnExecute.throw_std_exception"); - handler->DropIndex(&context, &table_name, &status); + handler->DropIndex(&context, &collection_name, &status); } TEST_F(RpcHandlerTest, INSERT_TEST) { @@ -376,10 +376,10 @@ TEST_F(RpcHandlerTest, SEARCH_TEST) { // test null input handler->Search(&context, nullptr, &response); - // test invalid table name + // test invalid collection name handler->Search(&context, &request, &response); - // test table not exist + // test collection not exist request.set_table_name("test3"); handler->Search(&context, &request, &response); @@ -644,12 +644,12 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { ::milvus::grpc::Status response; std::string tablename = "tbl"; - // create table test + // create collection test // test null input handler->CreateTable(&context, nullptr, &response); - // test invalid table name + // test invalid collection name handler->CreateTable(&context, &tableschema, &response); - // test invalid table dimension + // test invalid collection dimension tableschema.set_table_name(tablename); handler->CreateTable(&context, &tableschema, &response); // test invalid index file size @@ -658,27 +658,27 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { // test invalid index metric type tableschema.set_index_file_size(INDEX_FILE_SIZE); handler->CreateTable(&context, &tableschema, &response); - // test table already exist + // test collection already exist tableschema.set_metric_type(1); handler->CreateTable(&context, &tableschema, &response); - // describe table test - // test invalid table name - ::milvus::grpc::TableName table_name; + // describe collection test + // test invalid collection name + ::milvus::grpc::TableName collection_name; ::milvus::grpc::TableSchema table_schema; - handler->DescribeTable(&context, &table_name, &table_schema); + handler->DescribeTable(&context, &collection_name, &table_schema); - table_name.set_table_name(TABLE_NAME); - ::grpc::Status status = handler->DescribeTable(&context, &table_name, &table_schema); + collection_name.set_table_name(TABLE_NAME); + ::grpc::Status status = handler->DescribeTable(&context, &collection_name, &table_schema); ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code()); fiu_init(0); fiu_enable("DescribeTableRequest.OnExecute.describe_table_fail", 1, NULL, 0); - handler->DescribeTable(&context, &table_name, &table_schema); + handler->DescribeTable(&context, &collection_name, &table_schema); fiu_disable("DescribeTableRequest.OnExecute.describe_table_fail"); fiu_enable("DescribeTableRequest.OnExecute.throw_std_exception", 1, NULL, 0); - handler->DescribeTable(&context, &table_name, &table_schema); + handler->DescribeTable(&context, &collection_name, &table_schema); fiu_disable("DescribeTableRequest.OnExecute.throw_std_exception"); ::milvus::grpc::InsertParam request; @@ -689,7 +689,7 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { vector_ids.add_vector_id_array(i); } // Insert vectors - // test invalid table name + // test invalid collection name handler->Insert(&context, &request, &vector_ids); request.set_table_name(tablename); // test empty row record @@ -722,9 +722,9 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { status = handler->ShowTables(&context, &cmd, &table_name_list); ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code()); - // show table info + // show collection info ::milvus::grpc::TableInfo table_info; - status = handler->ShowTableInfo(&context, &table_name, &table_info); + status = handler->ShowTableInfo(&context, &collection_name, &table_info); ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code()); fiu_init(0); @@ -732,40 +732,40 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { handler->ShowTables(&context, &cmd, &table_name_list); fiu_disable("ShowTablesRequest.OnExecute.show_tables_fail"); - // Count Table + // Count Collection ::milvus::grpc::TableRowCount count; - table_name.Clear(); - status = handler->CountTable(&context, &table_name, &count); - table_name.set_table_name(tablename); - status = handler->CountTable(&context, &table_name, &count); + collection_name.Clear(); + status = handler->CountTable(&context, &collection_name, &count); + collection_name.set_table_name(tablename); + status = handler->CountTable(&context, &collection_name, &count); ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code()); // ASSERT_EQ(count.table_row_count(), vector_ids.vector_id_array_size()); fiu_init(0); fiu_enable("CountTableRequest.OnExecute.db_not_found", 1, NULL, 0); - status = handler->CountTable(&context, &table_name, &count); + status = handler->CountTable(&context, &collection_name, &count); fiu_disable("CountTableRequest.OnExecute.db_not_found"); fiu_enable("CountTableRequest.OnExecute.status_error", 1, NULL, 0); - status = handler->CountTable(&context, &table_name, &count); + status = handler->CountTable(&context, &collection_name, &count); fiu_disable("CountTableRequest.OnExecute.status_error"); fiu_enable("CountTableRequest.OnExecute.throw_std_exception", 1, NULL, 0); - status = handler->CountTable(&context, &table_name, &count); + status = handler->CountTable(&context, &collection_name, &count); fiu_disable("CountTableRequest.OnExecute.throw_std_exception"); - // Preload Table - table_name.Clear(); - status = handler->PreloadTable(&context, &table_name, &response); - table_name.set_table_name(TABLE_NAME); - status = handler->PreloadTable(&context, &table_name, &response); + // Preload Collection + collection_name.Clear(); + status = handler->PreloadTable(&context, &collection_name, &response); + collection_name.set_table_name(TABLE_NAME); + status = handler->PreloadTable(&context, &collection_name, &response); ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code()); fiu_enable("PreloadTableRequest.OnExecute.preload_table_fail", 1, NULL, 0); - handler->PreloadTable(&context, &table_name, &response); + handler->PreloadTable(&context, &collection_name, &response); fiu_disable("PreloadTableRequest.OnExecute.preload_table_fail"); fiu_enable("PreloadTableRequest.OnExecute.throw_std_exception", 1, NULL, 0); - handler->PreloadTable(&context, &table_name, &response); + handler->PreloadTable(&context, &collection_name, &response); fiu_disable("PreloadTableRequest.OnExecute.throw_std_exception"); fiu_init(0); @@ -793,44 +793,44 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code()); fiu_disable("CreateTableRequest.OnExecute.throw_std_exception"); - // Drop table - table_name.set_table_name(""); - // test invalid table name - ::grpc::Status grpc_status = handler->DropTable(&context, &table_name, &response); - table_name.set_table_name(tablename); + // Drop collection + collection_name.set_table_name(""); + // test invalid collection name + ::grpc::Status grpc_status = handler->DropTable(&context, &collection_name, &response); + collection_name.set_table_name(tablename); fiu_enable("DropTableRequest.OnExecute.db_not_found", 1, NULL, 0); - handler->DropTable(&context, &table_name, &response); + handler->DropTable(&context, &collection_name, &response); ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code()); fiu_disable("DropTableRequest.OnExecute.db_not_found"); fiu_enable("DropTableRequest.OnExecute.describe_table_fail", 1, NULL, 0); - handler->DropTable(&context, &table_name, &response); + handler->DropTable(&context, &collection_name, &response); ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code()); fiu_disable("DropTableRequest.OnExecute.describe_table_fail"); fiu_enable("DropTableRequest.OnExecute.throw_std_exception", 1, NULL, 0); - handler->DropTable(&context, &table_name, &response); + handler->DropTable(&context, &collection_name, &response); ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code()); fiu_disable("DropTableRequest.OnExecute.throw_std_exception"); - grpc_status = handler->DropTable(&context, &table_name, &response); + grpc_status = handler->DropTable(&context, &collection_name, &response); ASSERT_EQ(grpc_status.error_code(), ::grpc::Status::OK.error_code()); int error_code = response.error_code(); ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS); - tableschema.set_table_name(table_name.table_name()); - handler->DropTable(&context, &table_name, &response); + tableschema.set_table_name(collection_name.table_name()); + handler->DropTable(&context, &collection_name, &response); sleep(1); handler->CreateTable(&context, &tableschema, &response); ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); fiu_enable("DropTableRequest.OnExecute.drop_table_fail", 1, NULL, 0); - handler->DropTable(&context, &table_name, &response); + handler->DropTable(&context, &collection_name, &response); ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code()); fiu_disable("DropTableRequest.OnExecute.drop_table_fail"); - handler->DropTable(&context, &table_name, &response); + handler->DropTable(&context, &collection_name, &response); } TEST_F(RpcHandlerTest, PARTITION_TEST) { @@ -853,20 +853,20 @@ TEST_F(RpcHandlerTest, PARTITION_TEST) { handler->CreatePartition(&context, &partition_param, &response); ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); - ::milvus::grpc::TableName table_name; - table_name.set_table_name(str_table_name); + ::milvus::grpc::TableName collection_name; + collection_name.set_table_name(str_table_name); ::milvus::grpc::PartitionList partition_list; - handler->ShowPartitions(&context, &table_name, &partition_list); + handler->ShowPartitions(&context, &collection_name, &partition_list); ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); ASSERT_EQ(partition_list.partition_tag_array_size(), 2); fiu_init(0); fiu_enable("ShowPartitionsRequest.OnExecute.invalid_table_name", 1, NULL, 0); - handler->ShowPartitions(&context, &table_name, &partition_list); + handler->ShowPartitions(&context, &collection_name, &partition_list); fiu_disable("ShowPartitionsRequest.OnExecute.invalid_table_name"); fiu_enable("ShowPartitionsRequest.OnExecute.show_partition_fail", 1, NULL, 0); - handler->ShowPartitions(&context, &table_name, &partition_list); + handler->ShowPartitions(&context, &collection_name, &partition_list); fiu_disable("ShowPartitionsRequest.OnExecute.show_partition_fail"); fiu_init(0); diff --git a/core/unittest/server/test_util.cpp b/core/unittest/server/test_util.cpp index d8b786947c..428ca5770d 100644 --- a/core/unittest/server/test_util.cpp +++ b/core/unittest/server/test_util.cpp @@ -320,40 +320,40 @@ TEST(UtilTest, STATUS_TEST) { } TEST(ValidationUtilTest, VALIDATE_TABLENAME_TEST) { - std::string table_name = "Normal123_"; - auto status = milvus::server::ValidationUtil::ValidateTableName(table_name); + std::string collection_name = "Normal123_"; + auto status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_TRUE(status.ok()); - table_name = "12sds"; - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = "12sds"; + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); - table_name = ""; - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = ""; + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); - table_name = "_asdasd"; - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = "_asdasd"; + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_SUCCESS); - table_name = "!@#!@"; - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = "!@#!@"; + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); - table_name = "_!@#!@"; - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = "_!@#!@"; + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); - table_name = "中文"; - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = "中文"; + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); - table_name = std::string(10000, 'a'); - status = milvus::server::ValidationUtil::ValidateTableName(table_name); + collection_name = std::string(10000, 'a'); + status = milvus::server::ValidationUtil::ValidateCollectionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); - table_name = ""; - status = milvus::server::ValidationUtil::ValidatePartitionName(table_name); + collection_name = ""; + status = milvus::server::ValidationUtil::ValidatePartitionName(collection_name); ASSERT_EQ(status.code(), milvus::SERVER_INVALID_TABLE_NAME); } @@ -437,7 +437,7 @@ TEST(ValidationUtilTest, VALIDATE_INDEX_TEST) { } TEST(ValidationUtilTest, VALIDATE_INDEX_PARAMS_TEST) { - milvus::engine::meta::TableSchema table_schema; + milvus::engine::meta::CollectionSchema table_schema; table_schema.dimension_ = 64; milvus::json json_params = {}; @@ -578,7 +578,7 @@ TEST(ValidationUtilTest, VALIDATE_INDEX_PARAMS_TEST) { TEST(ValidationUtilTest, VALIDATE_SEARCH_PARAMS_TEST) { int64_t topk = 10; - milvus::engine::meta::TableSchema table_schema; + milvus::engine::meta::CollectionSchema table_schema; table_schema.dimension_ = 64; milvus::json json_params = {}; diff --git a/core/unittest/server/test_web.cpp b/core/unittest/server/test_web.cpp index 634d230109..93aa6c1ebe 100644 --- a/core/unittest/server/test_web.cpp +++ b/core/unittest/server/test_web.cpp @@ -1159,7 +1159,7 @@ TEST_F(WebControllerTest, INDEX) { response = client_ptr->dropIndex(collection_name, conncetion_ptr); ASSERT_EQ(OStatus::CODE_204.code, response->getStatusCode()); - // create index without existing table + // create index without existing collection response = client_ptr->createIndex(collection_name + "fgafafafafafUUUUUUa124254", index_json.dump().c_str(), conncetion_ptr); ASSERT_EQ(OStatus::CODE_404.code, response->getStatusCode()); @@ -1198,7 +1198,7 @@ TEST_F(WebControllerTest, INDEX) { ASSERT_TRUE(nlist_json.is_number()); ASSERT_EQ(10, nlist_json.get()); - // get index of table which not exists + // get index of collection which not exists response = client_ptr->getIndex(collection_name + "dfaedXXXdfdfet4t343aa4", conncetion_ptr); ASSERT_EQ(OStatus::CODE_404.code, response->getStatusCode()); auto error_dto = response->readBodyToDto(object_mapper.get()); diff --git a/sdk/grpc/ClientProxy.cpp b/sdk/grpc/ClientProxy.cpp index 1e9dbba1ba..b3ef60fade 100644 --- a/sdk/grpc/ClientProxy.cpp +++ b/sdk/grpc/ClientProxy.cpp @@ -367,7 +367,7 @@ ClientProxy::DescribeCollection(const std::string& collection_name, CollectionPa Status status = client_ptr_->DescribeTable(collection_name, grpc_schema); - collection_param.collection_name = grpc_schema.table_name(); + collection_param.collection_name = grpc_schema.collection_name(); collection_param.dimension = grpc_schema.dimension(); collection_param.index_file_size = grpc_schema.index_file_size(); collection_param.metric_type = static_cast(grpc_schema.metric_type()); @@ -400,7 +400,7 @@ ClientProxy::ShowCollections(std::vector& collection_array) { collection_array.resize(collection_name_list.table_names_size()); for (uint64_t i = 0; i < collection_name_list.table_names_size(); ++i) { - collection_array[i] = collection_name_list.table_names(i); + collection_array[i] = collection_name_list.collection_names(i); } return status; } catch (std::exception& ex) { diff --git a/sdk/grpc/GrpcClient.cpp b/sdk/grpc/GrpcClient.cpp index 96773f9c75..06cbf7b6dc 100644 --- a/sdk/grpc/GrpcClient.cpp +++ b/sdk/grpc/GrpcClient.cpp @@ -54,10 +54,10 @@ GrpcClient::CreateTable(const ::milvus::grpc::TableSchema& table_schema) { } bool -GrpcClient::HasTable(const ::milvus::grpc::TableName& table_name, Status& status) { +GrpcClient::HasTable(const ::milvus::grpc::TableName& collection_name, Status& status) { ClientContext context; ::milvus::grpc::BoolReply response; - ::grpc::Status grpc_status = stub_->HasTable(&context, table_name, &response); + ::grpc::Status grpc_status = stub_->HasTable(&context, collection_name, &response); if (!grpc_status.ok()) { std::cerr << "HasTable gRPC failed!" << std::endl; @@ -72,10 +72,10 @@ GrpcClient::HasTable(const ::milvus::grpc::TableName& table_name, Status& status } Status -GrpcClient::DropTable(const ::milvus::grpc::TableName& table_name) { +GrpcClient::DropTable(const ::milvus::grpc::TableName& collection_name) { ClientContext context; grpc::Status response; - ::grpc::Status grpc_status = stub_->DropTable(&context, table_name, &response); + ::grpc::Status grpc_status = stub_->DropTable(&context, collection_name, &response); if (!grpc_status.ok()) { std::cerr << "DropTable gRPC failed!" << std::endl; @@ -179,10 +179,10 @@ GrpcClient::Search( } Status -GrpcClient::DescribeTable(const std::string& table_name, ::milvus::grpc::TableSchema& grpc_schema) { +GrpcClient::DescribeTable(const std::string& collection_name, ::milvus::grpc::TableSchema& grpc_schema) { ClientContext context; ::milvus::grpc::TableName grpc_tablename; - grpc_tablename.set_table_name(table_name); + grpc_tablename.set_table_name(collection_name); ::grpc::Status grpc_status = stub_->DescribeTable(&context, grpc_tablename, &grpc_schema); if (!grpc_status.ok()) { @@ -200,10 +200,10 @@ GrpcClient::DescribeTable(const std::string& table_name, ::milvus::grpc::TableSc } int64_t -GrpcClient::CountTable(grpc::TableName& table_name, Status& status) { +GrpcClient::CountTable(grpc::TableName& collection_name, Status& status) { ClientContext context; ::milvus::grpc::TableRowCount response; - ::grpc::Status grpc_status = stub_->CountTable(&context, table_name, &response); + ::grpc::Status grpc_status = stub_->CountTable(&context, collection_name, &response); if (!grpc_status.ok()) { std::cerr << "CountTable rpc failed!" << std::endl; @@ -242,10 +242,10 @@ GrpcClient::ShowTables(milvus::grpc::TableNameList& table_name_list) { } Status -GrpcClient::ShowTableInfo(grpc::TableName& table_name, grpc::TableInfo& table_info) { +GrpcClient::ShowTableInfo(grpc::TableName& collection_name, grpc::TableInfo& table_info) { ClientContext context; ::milvus::grpc::Command command; - ::grpc::Status grpc_status = stub_->ShowTableInfo(&context, table_name, &table_info); + ::grpc::Status grpc_status = stub_->ShowTableInfo(&context, collection_name, &table_info); if (!grpc_status.ok()) { std::cerr << "ShowTableInfo gRPC failed!" << std::endl; @@ -284,10 +284,10 @@ GrpcClient::Cmd(const std::string& cmd, std::string& result) { } Status -GrpcClient::PreloadTable(milvus::grpc::TableName& table_name) { +GrpcClient::PreloadTable(milvus::grpc::TableName& collection_name) { ClientContext context; ::milvus::grpc::Status response; - ::grpc::Status grpc_status = stub_->PreloadTable(&context, table_name, &response); + ::grpc::Status grpc_status = stub_->PreloadTable(&context, collection_name, &response); if (!grpc_status.ok()) { std::cerr << "PreloadTable gRPC failed!" << std::endl; @@ -320,9 +320,9 @@ GrpcClient::DeleteByID(grpc::DeleteByIDParam& delete_by_id_param) { } Status -GrpcClient::DescribeIndex(grpc::TableName& table_name, grpc::IndexParam& index_param) { +GrpcClient::DescribeIndex(grpc::TableName& collection_name, grpc::IndexParam& index_param) { ClientContext context; - ::grpc::Status grpc_status = stub_->DescribeIndex(&context, table_name, &index_param); + ::grpc::Status grpc_status = stub_->DescribeIndex(&context, collection_name, &index_param); if (!grpc_status.ok()) { std::cerr << "DescribeIndex rpc failed!" << std::endl; @@ -337,10 +337,10 @@ GrpcClient::DescribeIndex(grpc::TableName& table_name, grpc::IndexParam& index_p } Status -GrpcClient::DropIndex(grpc::TableName& table_name) { +GrpcClient::DropIndex(grpc::TableName& collection_name) { ClientContext context; ::milvus::grpc::Status response; - ::grpc::Status grpc_status = stub_->DropIndex(&context, table_name, &response); + ::grpc::Status grpc_status = stub_->DropIndex(&context, collection_name, &response); if (!grpc_status.ok()) { std::cerr << "DropIndex gRPC failed!" << std::endl; @@ -373,9 +373,9 @@ GrpcClient::CreatePartition(const grpc::PartitionParam& partition_param) { } Status -GrpcClient::ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const { +GrpcClient::ShowPartitions(const grpc::TableName& collection_name, grpc::PartitionList& partition_array) const { ClientContext context; - ::grpc::Status grpc_status = stub_->ShowPartitions(&context, table_name, &partition_array); + ::grpc::Status grpc_status = stub_->ShowPartitions(&context, collection_name, &partition_array); if (!grpc_status.ok()) { std::cerr << "ShowPartitions gRPC failed!" << std::endl; @@ -408,12 +408,12 @@ GrpcClient::DropPartition(const ::milvus::grpc::PartitionParam& partition_param) } Status -GrpcClient::Flush(const std::string& table_name) { +GrpcClient::Flush(const std::string& collection_name) { ClientContext context; ::milvus::grpc::FlushParam param; - if (!table_name.empty()) { - param.add_table_name_array(table_name); + if (!collection_name.empty()) { + param.add_table_name_array(collection_name); } ::milvus::grpc::Status response; @@ -432,10 +432,10 @@ GrpcClient::Flush(const std::string& table_name) { } Status -GrpcClient::Compact(milvus::grpc::TableName& table_name) { +GrpcClient::Compact(milvus::grpc::TableName& collection_name) { ClientContext context; ::milvus::grpc::Status response; - ::grpc::Status grpc_status = stub_->Compact(&context, table_name, &response); + ::grpc::Status grpc_status = stub_->Compact(&context, collection_name, &response); if (!grpc_status.ok()) { std::cerr << "Compact gRPC failed!" << std::endl;