mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
issue: #44358 Implement complete snapshot management system including creation, deletion, listing, description, and restoration capabilities across all system components. Key features: - Create snapshots for entire collections - Drop snapshots by name with proper cleanup - List snapshots with collection filtering - Describe snapshot details and metadata Components added/modified: - Client SDK with full snapshot API support and options - DataCoord snapshot service with metadata management - Proxy layer with task-based snapshot operations - Protocol buffer definitions for snapshot RPCs - Comprehensive unit tests with mockey framework - Integration tests for end-to-end validation Technical implementation: - Snapshot metadata storage in etcd with proper indexing - File-based snapshot data persistence in object storage - Garbage collection integration for snapshot cleanup - Error handling and validation across all operations - Thread-safe operations with proper locking mechanisms <!-- This is an auto-generated comment: release notes by coderabbit.ai --> - Core invariant/assumption: snapshots are immutable point‑in‑time captures identified by (collection, snapshot name/ID); etcd snapshot metadata is authoritative for lifecycle (PENDING → COMMITTED → DELETING) and per‑segment manifests live in object storage (Avro / StorageV2). GC and restore logic must see snapshotRefIndex loaded (snapshotMeta.IsRefIndexLoaded) before reclaiming or relying on segment/index files. - New capability added: full end‑to‑end snapshot subsystem — client SDK APIs (Create/Drop/List/Describe/Restore + restore job queries), DataCoord SnapshotWriter/Reader (Avro + StorageV2 manifests), snapshotMeta in meta, SnapshotManager orchestration (create/drop/describe/list/restore), copy‑segment restore tasks/inspector/checker, proxy & RPC surface, GC integration, and docs/tests — enabling point‑in‑time collection snapshots persisted to object storage and restorations orchestrated across components. - Logic removed/simplified and why: duplicated recursive compaction/delta‑log traversal and ad‑hoc lookup code were consolidated behind two focused APIs/owners (Handler.GetDeltaLogFromCompactTo for delta traversal and SnapshotManager/SnapshotReader for snapshot I/O). MixCoord/coordinator broker paths were converted to thin RPC proxies. This eliminates multiple implementations of the same traversal/lookup, reducing divergence and simplifying responsibility boundaries. - Why this does NOT introduce data loss or regressions: snapshot create/drop use explicit two‑phase semantics (PENDING → COMMIT/DELETING) with SnapshotWriter writing manifests and metadata before commit; GC uses snapshotRefIndex guards and IsRefIndexLoaded/GetSnapshotBySegment/GetSnapshotByIndex checks to avoid removing referenced files; restore flow pre‑allocates job IDs, validates resources (partitions/indexes), performs rollback on failure (rollbackRestoreSnapshot), and converts/updates segment/index metadata only after successful copy tasks. Extensive unit and integration tests exercise pending/deleting/GC/restore/error paths to ensure idempotence and protection against premature deletion. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: Wei Liu <wei.liu@zilliz.com>
143 lines
4.2 KiB
Go
143 lines
4.2 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package datacoord
|
|
|
|
import (
|
|
"time"
|
|
|
|
"go.uber.org/zap"
|
|
"google.golang.org/protobuf/proto"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
|
|
)
|
|
|
|
type CopySegmentJobFilter func(job CopySegmentJob) bool
|
|
|
|
func WithCopyJobCollectionID(collectionID int64) CopySegmentJobFilter {
|
|
return func(job CopySegmentJob) bool {
|
|
return job.GetCollectionId() == collectionID
|
|
}
|
|
}
|
|
|
|
func WithCopyJobStates(states ...datapb.CopySegmentJobState) CopySegmentJobFilter {
|
|
return func(job CopySegmentJob) bool {
|
|
for _, state := range states {
|
|
if job.GetState() == state {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
}
|
|
|
|
func WithoutCopyJobStates(states ...datapb.CopySegmentJobState) CopySegmentJobFilter {
|
|
return func(job CopySegmentJob) bool {
|
|
for _, state := range states {
|
|
if job.GetState() == state {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
}
|
|
|
|
type UpdateCopySegmentJobAction func(job CopySegmentJob)
|
|
|
|
func UpdateCopyJobState(state datapb.CopySegmentJobState) UpdateCopySegmentJobAction {
|
|
return func(job CopySegmentJob) {
|
|
job.(*copySegmentJob).CopySegmentJob.State = state
|
|
if state == datapb.CopySegmentJobState_CopySegmentJobCompleted ||
|
|
state == datapb.CopySegmentJobState_CopySegmentJobFailed {
|
|
// Set cleanup ts based on copy segment task retention
|
|
dur := Params.DataCoordCfg.CopySegmentTaskRetention.GetAsDuration(time.Second)
|
|
cleanupTime := time.Now().Add(dur)
|
|
cleanupTs := tsoutil.ComposeTSByTime(cleanupTime, 0)
|
|
job.(*copySegmentJob).CopySegmentJob.CleanupTs = cleanupTs
|
|
log.Info("set copy segment job cleanup ts",
|
|
zap.Int64("jobID", job.GetJobId()),
|
|
zap.Time("cleanupTime", cleanupTime),
|
|
zap.Uint64("cleanupTs", cleanupTs))
|
|
}
|
|
}
|
|
}
|
|
|
|
func UpdateCopyJobReason(reason string) UpdateCopySegmentJobAction {
|
|
return func(job CopySegmentJob) {
|
|
job.(*copySegmentJob).CopySegmentJob.Reason = reason
|
|
}
|
|
}
|
|
|
|
func UpdateCopyJobProgress(copied, total int64) UpdateCopySegmentJobAction {
|
|
return func(job CopySegmentJob) {
|
|
job.(*copySegmentJob).CopySegmentJob.CopiedSegments = copied
|
|
job.(*copySegmentJob).CopySegmentJob.TotalSegments = total
|
|
}
|
|
}
|
|
|
|
func UpdateCopyJobCompleteTs(completeTs uint64) UpdateCopySegmentJobAction {
|
|
return func(job CopySegmentJob) {
|
|
job.(*copySegmentJob).CopySegmentJob.CompleteTs = completeTs
|
|
}
|
|
}
|
|
|
|
func UpdateCopyJobTotalRows(totalRows int64) UpdateCopySegmentJobAction {
|
|
return func(job CopySegmentJob) {
|
|
job.(*copySegmentJob).CopySegmentJob.TotalRows = totalRows
|
|
}
|
|
}
|
|
|
|
type CopySegmentJob interface {
|
|
GetJobId() int64
|
|
GetDbId() int64
|
|
GetCollectionId() int64
|
|
GetCollectionName() string
|
|
GetState() datapb.CopySegmentJobState
|
|
GetReason() string
|
|
GetIdMappings() []*datapb.CopySegmentIDMapping // Lightweight ID mappings
|
|
GetOptions() []*commonpb.KeyValuePair
|
|
GetTimeoutTs() uint64
|
|
GetCleanupTs() uint64
|
|
GetStartTs() uint64
|
|
GetCompleteTs() uint64
|
|
GetTotalSegments() int64
|
|
GetCopiedSegments() int64
|
|
GetTotalRows() int64
|
|
GetSnapshotName() string
|
|
GetTR() *timerecord.TimeRecorder
|
|
Clone() CopySegmentJob
|
|
}
|
|
|
|
type copySegmentJob struct {
|
|
*datapb.CopySegmentJob
|
|
tr *timerecord.TimeRecorder
|
|
}
|
|
|
|
func (j *copySegmentJob) GetTR() *timerecord.TimeRecorder {
|
|
return j.tr
|
|
}
|
|
|
|
func (j *copySegmentJob) Clone() CopySegmentJob {
|
|
return ©SegmentJob{
|
|
CopySegmentJob: proto.Clone(j.CopySegmentJob).(*datapb.CopySegmentJob),
|
|
tr: j.tr,
|
|
}
|
|
}
|