mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
issue: #44358 Implement complete snapshot management system including creation, deletion, listing, description, and restoration capabilities across all system components. Key features: - Create snapshots for entire collections - Drop snapshots by name with proper cleanup - List snapshots with collection filtering - Describe snapshot details and metadata Components added/modified: - Client SDK with full snapshot API support and options - DataCoord snapshot service with metadata management - Proxy layer with task-based snapshot operations - Protocol buffer definitions for snapshot RPCs - Comprehensive unit tests with mockey framework - Integration tests for end-to-end validation Technical implementation: - Snapshot metadata storage in etcd with proper indexing - File-based snapshot data persistence in object storage - Garbage collection integration for snapshot cleanup - Error handling and validation across all operations - Thread-safe operations with proper locking mechanisms <!-- This is an auto-generated comment: release notes by coderabbit.ai --> - Core invariant/assumption: snapshots are immutable point‑in‑time captures identified by (collection, snapshot name/ID); etcd snapshot metadata is authoritative for lifecycle (PENDING → COMMITTED → DELETING) and per‑segment manifests live in object storage (Avro / StorageV2). GC and restore logic must see snapshotRefIndex loaded (snapshotMeta.IsRefIndexLoaded) before reclaiming or relying on segment/index files. - New capability added: full end‑to‑end snapshot subsystem — client SDK APIs (Create/Drop/List/Describe/Restore + restore job queries), DataCoord SnapshotWriter/Reader (Avro + StorageV2 manifests), snapshotMeta in meta, SnapshotManager orchestration (create/drop/describe/list/restore), copy‑segment restore tasks/inspector/checker, proxy & RPC surface, GC integration, and docs/tests — enabling point‑in‑time collection snapshots persisted to object storage and restorations orchestrated across components. - Logic removed/simplified and why: duplicated recursive compaction/delta‑log traversal and ad‑hoc lookup code were consolidated behind two focused APIs/owners (Handler.GetDeltaLogFromCompactTo for delta traversal and SnapshotManager/SnapshotReader for snapshot I/O). MixCoord/coordinator broker paths were converted to thin RPC proxies. This eliminates multiple implementations of the same traversal/lookup, reducing divergence and simplifying responsibility boundaries. - Why this does NOT introduce data loss or regressions: snapshot create/drop use explicit two‑phase semantics (PENDING → COMMIT/DELETING) with SnapshotWriter writing manifests and metadata before commit; GC uses snapshotRefIndex guards and IsRefIndexLoaded/GetSnapshotBySegment/GetSnapshotByIndex checks to avoid removing referenced files; restore flow pre‑allocates job IDs, validates resources (partitions/indexes), performs rollback on failure (rollbackRestoreSnapshot), and converts/updates segment/index metadata only after successful copy tasks. Extensive unit and integration tests exercise pending/deleting/GC/restore/error paths to ensure idempotence and protection against premature deletion. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: Wei Liu <wei.liu@zilliz.com>
210 lines
6.6 KiB
Go
210 lines
6.6 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package storage
|
|
|
|
import (
|
|
"context"
|
|
"io"
|
|
"time"
|
|
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
|
|
|
"github.com/milvus-io/milvus/pkg/v2/objectstorage"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
|
)
|
|
|
|
type AzureObjectStorage struct {
|
|
*service.Client
|
|
}
|
|
|
|
func newAzureObjectStorageWithConfig(ctx context.Context, c *objectstorage.Config) (*AzureObjectStorage, error) {
|
|
client, err := objectstorage.NewAzureObjectStorageClient(ctx, c)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return &AzureObjectStorage{Client: client}, nil
|
|
}
|
|
|
|
// BlobReader is implemented because Azure's stream body does not have ReadAt and Seek interfaces.
|
|
// BlobReader is not concurrency safe.
|
|
type BlobReader struct {
|
|
client *blockblob.Client
|
|
position int64
|
|
body io.ReadCloser
|
|
contentLength int64
|
|
needResetStream bool
|
|
}
|
|
|
|
func NewBlobReader(client *blockblob.Client, offset int64) (*BlobReader, error) {
|
|
return &BlobReader{client: client, position: offset, needResetStream: true}, nil
|
|
}
|
|
|
|
func (b *BlobReader) Read(p []byte) (n int, err error) {
|
|
ctx := context.TODO()
|
|
|
|
if b.needResetStream {
|
|
opts := &azblob.DownloadStreamOptions{
|
|
Range: blob.HTTPRange{
|
|
Offset: b.position,
|
|
},
|
|
}
|
|
object, err := b.client.DownloadStream(ctx, opts)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
b.body = object.Body
|
|
b.contentLength = *object.ContentLength
|
|
}
|
|
|
|
n, err = b.body.Read(p)
|
|
if err != nil {
|
|
return n, err
|
|
}
|
|
b.position += int64(n)
|
|
b.needResetStream = false
|
|
return n, nil
|
|
}
|
|
|
|
func (b *BlobReader) Close() error {
|
|
if b.body != nil {
|
|
return b.body.Close()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (b *BlobReader) ReadAt(p []byte, off int64) (n int, err error) {
|
|
httpRange := blob.HTTPRange{
|
|
Offset: off,
|
|
Count: int64(len(p)),
|
|
}
|
|
object, err := b.client.DownloadStream(context.Background(), &blob.DownloadStreamOptions{
|
|
Range: httpRange,
|
|
})
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer object.Body.Close()
|
|
return io.ReadFull(object.Body, p)
|
|
}
|
|
|
|
func (b *BlobReader) Seek(offset int64, whence int) (int64, error) {
|
|
props, err := b.client.GetProperties(context.Background(), &blob.GetPropertiesOptions{})
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
size := *props.ContentLength
|
|
var newOffset int64
|
|
switch whence {
|
|
case io.SeekStart:
|
|
newOffset = offset
|
|
case io.SeekCurrent:
|
|
newOffset = b.position + offset
|
|
case io.SeekEnd:
|
|
newOffset = size + offset
|
|
default:
|
|
return 0, merr.WrapErrIoFailedReason("invalid whence")
|
|
}
|
|
|
|
b.position = newOffset
|
|
b.needResetStream = true
|
|
return newOffset, nil
|
|
}
|
|
|
|
func (b *BlobReader) Size() (int64, error) {
|
|
return b.contentLength, nil
|
|
}
|
|
|
|
func (AzureObjectStorage *AzureObjectStorage) GetObject(ctx context.Context, bucketName, objectName string, offset int64, size int64) (FileReader, error) {
|
|
return NewBlobReader(AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName), offset)
|
|
}
|
|
|
|
func (AzureObjectStorage *AzureObjectStorage) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64) error {
|
|
_, err := AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).UploadStream(ctx, reader, &azblob.UploadStreamOptions{})
|
|
return checkObjectStorageError(objectName, err)
|
|
}
|
|
|
|
func (AzureObjectStorage *AzureObjectStorage) StatObject(ctx context.Context, bucketName, objectName string) (int64, error) {
|
|
info, err := AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).GetProperties(ctx, &blob.GetPropertiesOptions{})
|
|
if err != nil {
|
|
return 0, checkObjectStorageError(objectName, err)
|
|
}
|
|
return *info.ContentLength, nil
|
|
}
|
|
|
|
func (AzureObjectStorage *AzureObjectStorage) WalkWithObjects(ctx context.Context, bucketName string, prefix string, recursive bool, walkFunc ChunkObjectWalkFunc) error {
|
|
if recursive {
|
|
pager := AzureObjectStorage.Client.NewContainerClient(bucketName).NewListBlobsFlatPager(&azblob.ListBlobsFlatOptions{
|
|
Prefix: &prefix,
|
|
})
|
|
for pager.More() {
|
|
pageResp, err := pager.NextPage(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, blob := range pageResp.Segment.BlobItems {
|
|
if !walkFunc(&ChunkObjectInfo{FilePath: *blob.Name, ModifyTime: *blob.Properties.LastModified}) {
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
pager := AzureObjectStorage.Client.NewContainerClient(bucketName).NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
|
|
Prefix: &prefix,
|
|
})
|
|
for pager.More() {
|
|
pageResp, err := pager.NextPage(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, blob := range pageResp.Segment.BlobItems {
|
|
if !walkFunc(&ChunkObjectInfo{FilePath: *blob.Name, ModifyTime: *blob.Properties.LastModified}) {
|
|
return nil
|
|
}
|
|
}
|
|
for _, blob := range pageResp.Segment.BlobPrefixes {
|
|
if !walkFunc(&ChunkObjectInfo{FilePath: *blob.Name, ModifyTime: time.Now()}) {
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (AzureObjectStorage *AzureObjectStorage) RemoveObject(ctx context.Context, bucketName, objectName string) error {
|
|
_, err := AzureObjectStorage.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).Delete(ctx, &blob.DeleteOptions{})
|
|
return checkObjectStorageError(objectName, err)
|
|
}
|
|
|
|
func (AzureObjectStorage *AzureObjectStorage) CopyObject(ctx context.Context, bucketName, srcObjectName, dstObjectName string) error {
|
|
containerClient := AzureObjectStorage.Client.NewContainerClient(bucketName)
|
|
srcBlobClient := containerClient.NewBlockBlobClient(srcObjectName)
|
|
dstBlobClient := containerClient.NewBlockBlobClient(dstObjectName)
|
|
|
|
// Get source blob URL
|
|
srcURL := srcBlobClient.URL()
|
|
|
|
// Start copy operation
|
|
_, err := dstBlobClient.StartCopyFromURL(ctx, srcURL, &blob.StartCopyFromURLOptions{})
|
|
return checkObjectStorageError(dstObjectName, err)
|
|
}
|