diff --git a/CHANGELOG.md b/CHANGELOG.md index 063814d567..b6b4119fda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ Please mark all change in change log and use the issue from GitHub - \#1590 Server down caused by failure to write file during concurrent mixed operations - \#1598 Server down during mixed operations - \#1601 External link bug in HTTP doc +- \#1609 Refine Compact function ## Feature - \#216 Add CLI to get server info diff --git a/core/src/db/DBImpl.cpp b/core/src/db/DBImpl.cpp index f83c0d2b5a..9a59281d83 100644 --- a/core/src/db/DBImpl.cpp +++ b/core/src/db/DBImpl.cpp @@ -671,26 +671,6 @@ DBImpl::Compact(const std::string& table_id) { ENGINE_LOG_DEBUG << "Compacting table: " << table_id; - /* - // Save table index - TableIndex table_index; - status = DescribeIndex(table_id, table_index); - if (!status.ok()) { - return status; - } - - // Drop all index - status = DropIndex(table_id); - if (!status.ok()) { - return status; - } - - // Then update table index to the previous index - status = UpdateTableIndexRecursively(table_id, table_index); - if (!status.ok()) { - return status; - } - */ // Get files to compact from meta. std::vector file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX, meta::TableFileSchema::FILE_TYPE::BACKUP}; @@ -706,7 +686,6 @@ DBImpl::Compact(const std::string& table_id) { OngoingFileChecker::GetInstance().MarkOngoingFiles(files_to_compact); - meta::TableFilesSchema files_to_update; Status compact_status; for (auto& file : files_to_compact) { // Check if the segment needs compacting @@ -719,52 +698,41 @@ DBImpl::Compact(const std::string& table_id) { if (!status.ok()) { std::string msg = "Failed to load deleted_docs from " + segment_dir; ENGINE_LOG_ERROR << msg; - return Status(DB_ERROR, msg); + OngoingFileChecker::GetInstance().UnmarkOngoingFile(file); + continue; // skip this file and try compact next one } + meta::TableFilesSchema files_to_update; if (deleted_docs->GetSize() != 0) { compact_status = CompactFile(table_id, file, files_to_update); if (!compact_status.ok()) { ENGINE_LOG_ERROR << "Compact failed for segment " << file.segment_id_ << ": " << compact_status.message(); - break; + OngoingFileChecker::GetInstance().UnmarkOngoingFile(file); + continue; // skip this file and try compact next one } } else { + OngoingFileChecker::GetInstance().UnmarkOngoingFile(file); ENGINE_LOG_DEBUG << "Segment " << file.segment_id_ << " has no deleted data. No need to compact"; + continue; // skip this file and try compact next one + } + + ENGINE_LOG_DEBUG << "Updating meta after compaction..."; + status = meta_ptr_->UpdateTableFiles(files_to_update); + if (!status.ok()) { + compact_status = status; + break; // meta error, could not go on } } + OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_compact); + if (compact_status.ok()) { ENGINE_LOG_DEBUG << "Finished compacting table: " << table_id; } - ENGINE_LOG_DEBUG << "Updating meta after compaction..."; - - /* - // Drop index again, in case some files were in the index building process during compacting - status = DropIndex(table_id); - if (!status.ok()) { - return status; - } - - // Update index - status = UpdateTableIndexRecursively(table_id, table_index); - if (!status.ok()) { - return status; - } - */ - - status = meta_ptr_->UpdateTableFiles(files_to_update); - if (!status.ok()) { - return status; - } - - OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_compact); - - ENGINE_LOG_DEBUG << "Finished updating meta after compaction"; - - return status; + return compact_status; } Status