mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
Related #44956 This commit integrates the Storage V2 FFI (Foreign Function Interface) interface throughout the Milvus codebase, enabling unified storage access through the Loon FFI layer. This is a significant step towards standardizing storage operations across different storage versions. 1. Configuration Support - **configs/milvus.yaml**: Added `useLoonFFI` configuration flag under `common.storage.file.splitByAvgSize` section - Allows runtime toggle between traditional binlog readers and new FFI-based manifest readers - Default: `false` (maintains backward compatibility) 2. Core FFI Infrastructure Enhanced Utilities (internal/core/src/storage/loon_ffi/util.cpp/h) - **ToCStorageConfig()**: Converts Go's `StorageConfig` to C's `CStorageConfig` struct for FFI calls - **GetManifest()**: Parses manifest JSON and retrieves latest column groups using FFI - Accepts manifest path with `base_path` and `ver` fields - Calls `get_latest_column_groups()` FFI function - Returns column group information as string - Comprehensive error handling for JSON parsing and FFI errors 3. Dependency Updates - **internal/core/thirdparty/milvus-storage/CMakeLists.txt**: - Updated milvus-storage version from `0883026` to `302143c` - Ensures compatibility with latest FFI interfaces 4. Data Coordinator Changes All compaction task builders now include manifest path in segment binlogs: - **compaction_task_clustering.go**: Added `Manifest: segInfo.GetManifestPath()` to segment binlogs - **compaction_task_l0.go**: Added manifest path to both L0 segment selection and compaction plan building - **compaction_task_mix.go**: Added manifest path to mixed compaction segment binlogs - **meta.go**: Updated metadata completion logic: - `completeClusterCompactionMutation()`: Set `ManifestPath` in new segment info - `completeMixCompactionMutation()`: Preserve manifest path in compacted segments - `completeSortCompactionMutation()`: Include manifest path in sorted segments 5. Data Node Compactor Enhancements All compactors updated to support dual-mode reading (binlog vs manifest): 6. Flush & Sync Manager Updates Pack Writer V2 (pack_writer_v2.go) - **BulkPackWriterV2.Write()**: Extended return signature to include `manifest string` - Implementation: - Generate manifest path: `path.Join(pack.segmentID, "manifest.json")` - Write packed data using FFI-based writer - Return manifest path along with binlogs, deltas, and stats Task Handling (task.go) - Updated all sync task result handling to accommodate new manifest return value - Ensured backward compatibility for callers not using manifest 7. Go Storage Layer Integration New Interfaces and Implementations - **record_reader.go**: Interface for unified record reading across storage versions - **record_writer.go**: Interface for unified record writing across storage versions - **binlog_record_writer.go**: Concrete implementation for traditional binlog-based writing Enhanced Schema Support (schema.go, schema_test.go) - Schema conversion utilities to support FFI-based storage operations - Ensures proper Arrow schema mapping for V2 storage Serialization Updates - **serde.go, serde_events.go, serde_events_v2.go**: Updated to work with new reader/writer interfaces - Test files updated to validate dual-mode serialization 8. Storage V2 Packed Format FFI Common (storagev2/packed/ffi_common.go) - Common FFI utilities and type conversions for packed storage format Packed Writer FFI (storagev2/packed/packed_writer_ffi.go) - FFI-based implementation of packed writer - Integrates with Loon storage layer for efficient columnar writes Packed Reader FFI (storagev2/packed/packed_reader_ffi.go) - Already existed, now complemented by writer implementation 9. Protocol Buffer Updates data_coord.proto & datapb/data_coord.pb.go - Added `manifest` field to compaction segment messages - Enables passing manifest metadata through compaction pipeline worker.proto & workerpb/worker.pb.go - Added compaction parameter for `useLoonFFI` flag - Allows workers to receive FFI configuration from coordinator 10. Parameter Configuration component_param.go - Added `UseLoonFFI` parameter to compaction configuration - Reads from `common.storage.file.useLoonFFI` config path - Default: `false` for safe rollout 11. Test Updates - **clustering_compactor_storage_v2_test.go**: Updated signatures to handle manifest return value - **mix_compactor_storage_v2_test.go**: Updated test helpers for manifest support - **namespace_compactor_test.go**: Adjusted writer calls to expect manifest - **pack_writer_v2_test.go**: Validated manifest generation in pack writing This integration follows a **dual-mode approach**: 1. **Legacy Path**: Traditional binlog-based reading/writing (when `useLoonFFI=false` or no manifest) 2. **FFI Path**: Manifest-based reading/writing through Loon FFI (when `useLoonFFI=true` and manifest exists) --------- Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
220 lines
7.3 KiB
Go
220 lines
7.3 KiB
Go
// Copyright 2023 Zilliz
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package packed
|
|
|
|
/*
|
|
#cgo pkg-config: milvus_core
|
|
|
|
#include <stdlib.h>
|
|
#include "storage/loon_ffi/ffi_reader_c.h"
|
|
#include "arrow/c/abi.h"
|
|
#include "arrow/c/helpers.h"
|
|
*/
|
|
import "C"
|
|
|
|
import (
|
|
"fmt"
|
|
"io"
|
|
"unsafe"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow"
|
|
"github.com/apache/arrow/go/v17/arrow/cdata"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
|
)
|
|
|
|
func NewFFIPackedReader(manifest string, schema *arrow.Schema, neededColumns []string, bufferSize int64, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*FFIPackedReader, error) {
|
|
cManifest := C.CString(manifest)
|
|
defer C.free(unsafe.Pointer(cManifest))
|
|
|
|
var cas cdata.CArrowSchema
|
|
cdata.ExportArrowSchema(schema, &cas)
|
|
cSchema := (*C.struct_ArrowSchema)(unsafe.Pointer(&cas))
|
|
defer cdata.ReleaseCArrowSchema(&cas)
|
|
|
|
var cPackedReader C.CFFIPackedReader
|
|
var status C.CStatus
|
|
|
|
var pluginContextPtr *C.CPluginContext
|
|
if storagePluginContext != nil {
|
|
ckey := C.CString(storagePluginContext.EncryptionKey)
|
|
defer C.free(unsafe.Pointer(ckey))
|
|
var pluginContext C.CPluginContext
|
|
pluginContext.ez_id = C.int64_t(storagePluginContext.EncryptionZoneId)
|
|
pluginContext.collection_id = C.int64_t(storagePluginContext.CollectionId)
|
|
pluginContext.key = ckey
|
|
pluginContextPtr = &pluginContext
|
|
}
|
|
|
|
if storageConfig != nil {
|
|
cStorageConfig := C.CStorageConfig{
|
|
address: C.CString(storageConfig.GetAddress()),
|
|
bucket_name: C.CString(storageConfig.GetBucketName()),
|
|
access_key_id: C.CString(storageConfig.GetAccessKeyID()),
|
|
access_key_value: C.CString(storageConfig.GetSecretAccessKey()),
|
|
root_path: C.CString(storageConfig.GetRootPath()),
|
|
storage_type: C.CString(storageConfig.GetStorageType()),
|
|
cloud_provider: C.CString(storageConfig.GetCloudProvider()),
|
|
iam_endpoint: C.CString(storageConfig.GetIAMEndpoint()),
|
|
log_level: C.CString("Warn"), // TODO use config after storage support lower case configuration
|
|
useSSL: C.bool(storageConfig.GetUseSSL()),
|
|
sslCACert: C.CString(storageConfig.GetSslCACert()),
|
|
useIAM: C.bool(storageConfig.GetUseIAM()),
|
|
region: C.CString(storageConfig.GetRegion()),
|
|
useVirtualHost: C.bool(storageConfig.GetUseVirtualHost()),
|
|
requestTimeoutMs: C.int64_t(storageConfig.GetRequestTimeoutMs()),
|
|
gcp_credential_json: C.CString(storageConfig.GetGcpCredentialJSON()),
|
|
use_custom_part_upload: true,
|
|
max_connections: C.uint32_t(storageConfig.GetMaxConnections()),
|
|
}
|
|
defer C.free(unsafe.Pointer(cStorageConfig.address))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.bucket_name))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.access_key_id))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.access_key_value))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.root_path))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.storage_type))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.cloud_provider))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.iam_endpoint))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.log_level))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.sslCACert))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.region))
|
|
defer C.free(unsafe.Pointer(cStorageConfig.gcp_credential_json))
|
|
|
|
cNeededColumn := make([]*C.char, len(neededColumns))
|
|
for i, columnName := range neededColumns {
|
|
cNeededColumn[i] = C.CString(columnName)
|
|
defer C.free(unsafe.Pointer(cNeededColumn[i]))
|
|
}
|
|
cNeededColumnArray := (**C.char)(unsafe.Pointer(&cNeededColumn[0]))
|
|
cNumColumns := C.int64_t(len(neededColumns))
|
|
|
|
status = C.NewPackedFFIReaderWithManifest(cManifest, cSchema, cNeededColumnArray, cNumColumns, &cPackedReader, cStorageConfig, pluginContextPtr)
|
|
} else {
|
|
return nil, fmt.Errorf("storageConfig is required")
|
|
}
|
|
if err := ConsumeCStatusIntoError(&status); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Get the ArrowArrayStream
|
|
var cStream cdata.CArrowArrayStream
|
|
status = C.GetFFIReaderStream(cPackedReader, C.int64_t(8196), (*C.struct_ArrowArrayStream)(unsafe.Pointer(&cStream)))
|
|
if err := ConsumeCStatusIntoError(&status); err != nil {
|
|
C.CloseFFIReader(cPackedReader)
|
|
return nil, fmt.Errorf("failed to get reader stream: %w", err)
|
|
}
|
|
|
|
// Import the stream as a RecordReader
|
|
recordReader, err := cdata.ImportCRecordReader(&cStream, schema)
|
|
if err != nil {
|
|
C.CloseFFIReader(cPackedReader)
|
|
return nil, fmt.Errorf("failed to import record reader: %w", err)
|
|
}
|
|
|
|
return &FFIPackedReader{
|
|
cPackedReader: cPackedReader,
|
|
recordReader: recordReader,
|
|
schema: schema,
|
|
}, nil
|
|
}
|
|
|
|
// ReadNext reads the next record batch from the reader
|
|
func (r *FFIPackedReader) ReadNext() (arrow.Record, error) {
|
|
if r.recordReader == nil {
|
|
return nil, io.EOF
|
|
}
|
|
|
|
// no need to manual release
|
|
// stream reader will release previous one
|
|
|
|
// Read next record from the stream
|
|
rec, err := r.recordReader.Read()
|
|
if err != nil {
|
|
if err == io.EOF {
|
|
return nil, io.EOF
|
|
}
|
|
return nil, fmt.Errorf("failed to read next record: %w", err)
|
|
}
|
|
|
|
return rec, nil
|
|
}
|
|
|
|
// Close closes the FFI reader
|
|
func (r *FFIPackedReader) Close() error {
|
|
// no need to manual release current batch
|
|
// stream reader handles it
|
|
|
|
if r.recordReader != nil {
|
|
r.recordReader = nil
|
|
}
|
|
|
|
if r.cPackedReader != 0 {
|
|
status := C.CloseFFIReader(r.cPackedReader)
|
|
r.cPackedReader = 0
|
|
return ConsumeCStatusIntoError(&status)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Schema returns the schema of the reader
|
|
func (r *FFIPackedReader) Schema() *arrow.Schema {
|
|
return r.schema
|
|
}
|
|
|
|
// Retain increases the reference count
|
|
func (r *FFIPackedReader) Retain() {
|
|
// if r.recordReader != nil {
|
|
// r.recordReader.Retain()
|
|
// }
|
|
}
|
|
|
|
// Release decreases the reference count
|
|
func (r *FFIPackedReader) Release() {
|
|
r.Close()
|
|
}
|
|
|
|
func GetManifest(manifestPath string, storageConfig *indexpb.StorageConfig) (manifest string, err error) {
|
|
basePath, version, err := UnmarshalManfestPath(manifestPath)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
log.Info("GetManifest", zap.String("manifestPath", manifestPath), zap.String("basePath", basePath), zap.Int64("version", version))
|
|
|
|
cProperties, err := MakePropertiesFromStorageConfig(storageConfig, nil)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
cBasePath := C.CString(basePath)
|
|
defer C.free(unsafe.Pointer(cBasePath))
|
|
|
|
var cManifest *C.char
|
|
var cVersion C.int64_t
|
|
result := C.get_latest_column_groups(cBasePath, cProperties, &cManifest, &cVersion)
|
|
err = HandleFFIResult(result)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
manifest = C.GoString(cManifest)
|
|
return manifest, nil
|
|
}
|
|
|
|
// Ensure FFIPackedReader implements array.RecordReader interface
|
|
// var _ array.RecordReader = (*FFIPackedReader)(nil)
|