mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
feat: Add CMEK cipher plugin (#43722)
1. Enable Milvus to read cipher configs 2. Enable cipher plugin in binlog reader and writer 3. Add a testCipher for unittests 4. Support pooling for datanode 5. Add encryption in storagev2 See also: #40321 Signed-off-by: yangxuan <xuan.yang@zilliz.com> --------- Signed-off-by: yangxuan <xuan.yang@zilliz.com>
This commit is contained in:
parent
55b24b7a78
commit
37a447d166
@ -68,8 +68,10 @@ class MilvusConan(ConanFile):
|
|||||||
"arrow:with_boost": True,
|
"arrow:with_boost": True,
|
||||||
"arrow:with_thrift": True,
|
"arrow:with_thrift": True,
|
||||||
"arrow:with_jemalloc": True,
|
"arrow:with_jemalloc": True,
|
||||||
|
"arrow:with_openssl": True,
|
||||||
"arrow:shared": False,
|
"arrow:shared": False,
|
||||||
"arrow:with_s3": True,
|
"arrow:with_s3": True,
|
||||||
|
"arrow:encryption": True,
|
||||||
"aws-sdk-cpp:config": True,
|
"aws-sdk-cpp:config": True,
|
||||||
"aws-sdk-cpp:text-to-speech": False,
|
"aws-sdk-cpp:text-to-speech": False,
|
||||||
"aws-sdk-cpp:transfer": False,
|
"aws-sdk-cpp:transfer": False,
|
||||||
|
|||||||
@ -36,6 +36,8 @@ const milvus::FieldId TimestampFieldID = milvus::FieldId(1);
|
|||||||
const char ORIGIN_SIZE_KEY[] = "original_size";
|
const char ORIGIN_SIZE_KEY[] = "original_size";
|
||||||
const char INDEX_BUILD_ID_KEY[] = "indexBuildID";
|
const char INDEX_BUILD_ID_KEY[] = "indexBuildID";
|
||||||
const char NULLABLE[] = "nullable";
|
const char NULLABLE[] = "nullable";
|
||||||
|
const char EDEK[] = "edek";
|
||||||
|
const char EZID[] = "encryption_zone";
|
||||||
|
|
||||||
const char INDEX_ROOT_PATH[] = "index_files";
|
const char INDEX_ROOT_PATH[] = "index_files";
|
||||||
const char RAWDATA_ROOT_PATH[] = "raw_datas";
|
const char RAWDATA_ROOT_PATH[] = "raw_datas";
|
||||||
|
|||||||
@ -143,6 +143,12 @@ typedef struct CNewSegmentResult {
|
|||||||
CStatus status;
|
CStatus status;
|
||||||
CSegmentInterface segmentPtr;
|
CSegmentInterface segmentPtr;
|
||||||
} CNewSegmentResult;
|
} CNewSegmentResult;
|
||||||
|
|
||||||
|
typedef struct CPluginContext {
|
||||||
|
int64_t ez_id;
|
||||||
|
int64_t collection_id;
|
||||||
|
const char* key;
|
||||||
|
} CPluginContext;
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -16,6 +16,7 @@
|
|||||||
#include "fmt/core.h"
|
#include "fmt/core.h"
|
||||||
#include "indexbuilder/type_c.h"
|
#include "indexbuilder/type_c.h"
|
||||||
#include "log/Log.h"
|
#include "log/Log.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
|
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
#include <malloc.h>
|
#include <malloc.h>
|
||||||
@ -245,6 +246,19 @@ CreateIndex(CIndex* res_index,
|
|||||||
milvus::storage::FileManagerContext fileManagerContext(
|
milvus::storage::FileManagerContext fileManagerContext(
|
||||||
field_meta, index_meta, chunk_manager, fs);
|
field_meta, index_meta, chunk_manager, fs);
|
||||||
|
|
||||||
|
if (build_index_info->has_storage_plugin_context()){
|
||||||
|
auto cipherPlugin = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
AssertInfo(cipherPlugin != nullptr, "failed to get cipher plugin");
|
||||||
|
cipherPlugin->Update(build_index_info->storage_plugin_context().encryption_zone_id(),
|
||||||
|
build_index_info->storage_plugin_context().collection_id(),
|
||||||
|
build_index_info->storage_plugin_context().encryption_key());
|
||||||
|
|
||||||
|
auto plugin_context = std::make_shared<CPluginContext>();
|
||||||
|
plugin_context->ez_id = build_index_info->storage_plugin_context().encryption_zone_id();
|
||||||
|
plugin_context->collection_id = build_index_info->storage_plugin_context().collection_id();
|
||||||
|
fileManagerContext.set_plugin_context(plugin_context);
|
||||||
|
}
|
||||||
|
|
||||||
auto index =
|
auto index =
|
||||||
milvus::indexbuilder::IndexFactory::GetInstance().CreateIndex(
|
milvus::indexbuilder::IndexFactory::GetInstance().CreateIndex(
|
||||||
field_type, config, fileManagerContext);
|
field_type, config, fileManagerContext);
|
||||||
@ -323,6 +337,14 @@ BuildJsonKeyIndex(ProtoLayoutInterface result,
|
|||||||
milvus::storage::FileManagerContext fileManagerContext(
|
milvus::storage::FileManagerContext fileManagerContext(
|
||||||
field_meta, index_meta, chunk_manager, fs);
|
field_meta, index_meta, chunk_manager, fs);
|
||||||
|
|
||||||
|
if (build_index_info->has_storage_plugin_context()){
|
||||||
|
auto cipherPlugin = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
AssertInfo(cipherPlugin != nullptr, "failed to get cipher plugin");
|
||||||
|
cipherPlugin->Update(build_index_info->storage_plugin_context().encryption_zone_id(),
|
||||||
|
build_index_info->storage_plugin_context().collection_id(),
|
||||||
|
build_index_info->storage_plugin_context().encryption_key());
|
||||||
|
}
|
||||||
|
|
||||||
auto field_schema =
|
auto field_schema =
|
||||||
FieldMeta::ParseFrom(build_index_info->field_schema());
|
FieldMeta::ParseFrom(build_index_info->field_schema());
|
||||||
auto index = std::make_unique<index::JsonKeyStatsInvertedIndex>(
|
auto index = std::make_unique<index::JsonKeyStatsInvertedIndex>(
|
||||||
@ -396,6 +418,14 @@ BuildTextIndex(ProtoLayoutInterface result,
|
|||||||
milvus::storage::FileManagerContext fileManagerContext(
|
milvus::storage::FileManagerContext fileManagerContext(
|
||||||
field_meta, index_meta, chunk_manager, fs);
|
field_meta, index_meta, chunk_manager, fs);
|
||||||
|
|
||||||
|
if (build_index_info->has_storage_plugin_context()){
|
||||||
|
auto cipherPlugin = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
AssertInfo(cipherPlugin != nullptr, "failed to get cipher plugin");
|
||||||
|
cipherPlugin->Update(build_index_info->storage_plugin_context().encryption_zone_id(),
|
||||||
|
build_index_info->storage_plugin_context().collection_id(),
|
||||||
|
build_index_info->storage_plugin_context().encryption_key());
|
||||||
|
}
|
||||||
|
|
||||||
auto scalar_index_engine_version =
|
auto scalar_index_engine_version =
|
||||||
build_index_info->current_scalar_index_version();
|
build_index_info->current_scalar_index_version();
|
||||||
config[milvus::index::SCALAR_INDEX_ENGINE_VERSION] =
|
config[milvus::index::SCALAR_INDEX_ENGINE_VERSION] =
|
||||||
|
|||||||
@ -42,6 +42,7 @@
|
|||||||
#include "storage/RemoteChunkManagerSingleton.h"
|
#include "storage/RemoteChunkManagerSingleton.h"
|
||||||
#include "storage/Util.h"
|
#include "storage/Util.h"
|
||||||
#include "storage/ThreadPools.h"
|
#include "storage/ThreadPools.h"
|
||||||
|
#include "storage/KeyRetriever.h"
|
||||||
#include "common/TypeTraits.h"
|
#include "common/TypeTraits.h"
|
||||||
|
|
||||||
#include "milvus-storage/format/parquet/file_reader.h"
|
#include "milvus-storage/format/parquet/file_reader.h"
|
||||||
@ -463,7 +464,9 @@ SegmentGrowingImpl::load_column_group_data_internal(
|
|||||||
row_group_lists.reserve(insert_files.size());
|
row_group_lists.reserve(insert_files.size());
|
||||||
for (const auto& file : insert_files) {
|
for (const auto& file : insert_files) {
|
||||||
auto reader =
|
auto reader =
|
||||||
std::make_shared<milvus_storage::FileRowGroupReader>(fs, file);
|
std::make_shared<milvus_storage::FileRowGroupReader>(fs, file,
|
||||||
|
milvus_storage::DEFAULT_READ_BUFFER_SIZE,
|
||||||
|
storage::GetReaderProperties());
|
||||||
auto row_group_num =
|
auto row_group_num =
|
||||||
reader->file_metadata()->GetRowGroupMetadataVector().size();
|
reader->file_metadata()->GetRowGroupMetadataVector().size();
|
||||||
std::vector<int64_t> all_row_groups(row_group_num);
|
std::vector<int64_t> all_row_groups(row_group_num);
|
||||||
|
|||||||
@ -31,6 +31,8 @@
|
|||||||
#include "common/BitsetView.h"
|
#include "common/BitsetView.h"
|
||||||
#include "common/QueryResult.h"
|
#include "common/QueryResult.h"
|
||||||
#include "common/QueryInfo.h"
|
#include "common/QueryInfo.h"
|
||||||
|
#include "folly/SharedMutex.h"
|
||||||
|
#include "common/type_c.h"
|
||||||
#include "mmap/ChunkedColumnInterface.h"
|
#include "mmap/ChunkedColumnInterface.h"
|
||||||
#include "index/Index.h"
|
#include "index/Index.h"
|
||||||
#include "index/JsonFlatIndex.h"
|
#include "index/JsonFlatIndex.h"
|
||||||
|
|||||||
@ -35,6 +35,7 @@
|
|||||||
#include "log/Log.h"
|
#include "log/Log.h"
|
||||||
#include "storage/ThreadPools.h"
|
#include "storage/ThreadPools.h"
|
||||||
#include "common/Common.h"
|
#include "common/Common.h"
|
||||||
|
#include "storage/KeyRetriever.h"
|
||||||
|
|
||||||
namespace milvus::segcore {
|
namespace milvus::segcore {
|
||||||
|
|
||||||
@ -194,7 +195,8 @@ LoadWithStrategy(const std::vector<std::string>& remote_files,
|
|||||||
"[StorageV2] file system is nullptr");
|
"[StorageV2] file system is nullptr");
|
||||||
auto row_group_reader =
|
auto row_group_reader =
|
||||||
std::make_shared<milvus_storage::FileRowGroupReader>(
|
std::make_shared<milvus_storage::FileRowGroupReader>(
|
||||||
fs, file, schema, reader_memory_limit);
|
fs, file, schema, reader_memory_limit,
|
||||||
|
milvus::storage::GetReaderProperties());
|
||||||
AssertInfo(row_group_reader != nullptr,
|
AssertInfo(row_group_reader != nullptr,
|
||||||
"[StorageV2] row group reader is nullptr");
|
"[StorageV2] row group reader is nullptr");
|
||||||
row_group_reader->SetRowGroupOffsetAndCount(block.offset,
|
row_group_reader->SetRowGroupOffsetAndCount(block.offset,
|
||||||
|
|||||||
@ -17,6 +17,10 @@
|
|||||||
#include "milvus-storage/common/log.h"
|
#include "milvus-storage/common/log.h"
|
||||||
#include "milvus-storage/filesystem/fs.h"
|
#include "milvus-storage/filesystem/fs.h"
|
||||||
#include "milvus-storage/common/config.h"
|
#include "milvus-storage/common/config.h"
|
||||||
|
#include "parquet/encryption/encryption.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
|
#include "storage/KeyRetriever.h"
|
||||||
|
#include "log/Log.h"
|
||||||
|
|
||||||
#include <arrow/c/bridge.h>
|
#include <arrow/c/bridge.h>
|
||||||
#include <arrow/filesystem/filesystem.h>
|
#include <arrow/filesystem/filesystem.h>
|
||||||
@ -32,7 +36,8 @@ NewPackedReaderWithStorageConfig(char** paths,
|
|||||||
struct ArrowSchema* schema,
|
struct ArrowSchema* schema,
|
||||||
const int64_t buffer_size,
|
const int64_t buffer_size,
|
||||||
CStorageConfig c_storage_config,
|
CStorageConfig c_storage_config,
|
||||||
CPackedReader* c_packed_reader) {
|
CPackedReader* c_packed_reader,
|
||||||
|
CPluginContext* c_plugin_context) {
|
||||||
SCOPE_CGO_CALL_METRIC();
|
SCOPE_CGO_CALL_METRIC();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -66,8 +71,13 @@ NewPackedReaderWithStorageConfig(char** paths,
|
|||||||
"[StorageV2] Failed to get filesystem");
|
"[StorageV2] Failed to get filesystem");
|
||||||
}
|
}
|
||||||
auto trueSchema = arrow::ImportSchema(schema).ValueOrDie();
|
auto trueSchema = arrow::ImportSchema(schema).ValueOrDie();
|
||||||
|
auto plugin_ptr = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
if (plugin_ptr != nullptr && c_plugin_context != nullptr) {
|
||||||
|
plugin_ptr->Update(c_plugin_context->ez_id, c_plugin_context->collection_id, std::string(c_plugin_context->key));
|
||||||
|
}
|
||||||
|
|
||||||
auto reader = std::make_unique<milvus_storage::PackedRecordBatchReader>(
|
auto reader = std::make_unique<milvus_storage::PackedRecordBatchReader>(
|
||||||
trueFs, truePaths, trueSchema, buffer_size);
|
trueFs, truePaths, trueSchema, buffer_size, milvus::storage::GetReaderProperties());
|
||||||
*c_packed_reader = reader.release();
|
*c_packed_reader = reader.release();
|
||||||
return milvus::SuccessCStatus();
|
return milvus::SuccessCStatus();
|
||||||
} catch (std::exception& e) {
|
} catch (std::exception& e) {
|
||||||
@ -80,7 +90,8 @@ NewPackedReader(char** paths,
|
|||||||
int64_t num_paths,
|
int64_t num_paths,
|
||||||
struct ArrowSchema* schema,
|
struct ArrowSchema* schema,
|
||||||
const int64_t buffer_size,
|
const int64_t buffer_size,
|
||||||
CPackedReader* c_packed_reader) {
|
CPackedReader* c_packed_reader,
|
||||||
|
CPluginContext* c_plugin_context) {
|
||||||
SCOPE_CGO_CALL_METRIC();
|
SCOPE_CGO_CALL_METRIC();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -93,8 +104,14 @@ NewPackedReader(char** paths,
|
|||||||
"[StorageV2] Failed to get filesystem");
|
"[StorageV2] Failed to get filesystem");
|
||||||
}
|
}
|
||||||
auto trueSchema = arrow::ImportSchema(schema).ValueOrDie();
|
auto trueSchema = arrow::ImportSchema(schema).ValueOrDie();
|
||||||
|
|
||||||
|
auto plugin_ptr = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
if (plugin_ptr != nullptr && c_plugin_context != nullptr) {
|
||||||
|
plugin_ptr->Update(c_plugin_context->ez_id, c_plugin_context->collection_id, std::string(c_plugin_context->key));
|
||||||
|
}
|
||||||
|
|
||||||
auto reader = std::make_unique<milvus_storage::PackedRecordBatchReader>(
|
auto reader = std::make_unique<milvus_storage::PackedRecordBatchReader>(
|
||||||
trueFs, truePaths, trueSchema, buffer_size);
|
trueFs, truePaths, trueSchema, buffer_size, milvus::storage::GetReaderProperties());
|
||||||
*c_packed_reader = reader.release();
|
*c_packed_reader = reader.release();
|
||||||
return milvus::SuccessCStatus();
|
return milvus::SuccessCStatus();
|
||||||
} catch (std::exception& e) {
|
} catch (std::exception& e) {
|
||||||
|
|||||||
@ -31,7 +31,8 @@ NewPackedReaderWithStorageConfig(char** paths,
|
|||||||
struct ArrowSchema* schema,
|
struct ArrowSchema* schema,
|
||||||
const int64_t buffer_size,
|
const int64_t buffer_size,
|
||||||
CStorageConfig c_storage_config,
|
CStorageConfig c_storage_config,
|
||||||
CPackedReader* c_packed_reader);
|
CPackedReader* c_packed_reader,
|
||||||
|
CPluginContext* c_plugin_context);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Open a packed reader to read needed columns in the specified path.
|
* @brief Open a packed reader to read needed columns in the specified path.
|
||||||
@ -46,7 +47,8 @@ NewPackedReader(char** paths,
|
|||||||
int64_t num_paths,
|
int64_t num_paths,
|
||||||
struct ArrowSchema* schema,
|
struct ArrowSchema* schema,
|
||||||
const int64_t buffer_size,
|
const int64_t buffer_size,
|
||||||
CPackedReader* c_packed_reader);
|
CPackedReader* c_packed_reader,
|
||||||
|
CPluginContext* c_plugin_context);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Read the next record batch from the packed reader.
|
* @brief Read the next record batch from the packed reader.
|
||||||
|
|||||||
@ -12,11 +12,16 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "parquet/encryption/encryption.h"
|
||||||
|
#include "parquet/properties.h"
|
||||||
|
#include "parquet/types.h"
|
||||||
#include "segcore/column_groups_c.h"
|
#include "segcore/column_groups_c.h"
|
||||||
#include "segcore/packed_writer_c.h"
|
#include "segcore/packed_writer_c.h"
|
||||||
#include "milvus-storage/packed/writer.h"
|
#include "milvus-storage/packed/writer.h"
|
||||||
#include "milvus-storage/common/config.h"
|
#include "milvus-storage/common/config.h"
|
||||||
#include "milvus-storage/filesystem/fs.h"
|
#include "milvus-storage/filesystem/fs.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
|
#include "storage/KeyRetriever.h"
|
||||||
|
|
||||||
#include <arrow/c/bridge.h>
|
#include <arrow/c/bridge.h>
|
||||||
#include <arrow/filesystem/filesystem.h>
|
#include <arrow/filesystem/filesystem.h>
|
||||||
@ -37,7 +42,8 @@ NewPackedWriterWithStorageConfig(struct ArrowSchema* schema,
|
|||||||
int64_t part_upload_size,
|
int64_t part_upload_size,
|
||||||
CColumnGroups column_groups,
|
CColumnGroups column_groups,
|
||||||
CStorageConfig c_storage_config,
|
CStorageConfig c_storage_config,
|
||||||
CPackedWriter* c_packed_writer) {
|
CPackedWriter* c_packed_writer,
|
||||||
|
CPluginContext* c_plugin_context) {
|
||||||
SCOPE_CGO_CALL_METRIC();
|
SCOPE_CGO_CALL_METRIC();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -79,15 +85,32 @@ NewPackedWriterWithStorageConfig(struct ArrowSchema* schema,
|
|||||||
auto columnGroups =
|
auto columnGroups =
|
||||||
*static_cast<std::vector<std::vector<int>>*>(column_groups);
|
*static_cast<std::vector<std::vector<int>>*>(column_groups);
|
||||||
|
|
||||||
|
parquet::WriterProperties::Builder builder;
|
||||||
|
auto plugin_ptr = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
if (plugin_ptr != nullptr && c_plugin_context != nullptr) {
|
||||||
|
plugin_ptr->Update(c_plugin_context->ez_id, c_plugin_context->collection_id, std::string(c_plugin_context->key));
|
||||||
|
auto got = plugin_ptr->GetEncryptor(c_plugin_context->ez_id, c_plugin_context->collection_id);
|
||||||
|
parquet::FileEncryptionProperties::Builder file_encryption_builder(got.first->GetKey());
|
||||||
|
auto metadata = milvus::storage::EncodeKeyMetadata(
|
||||||
|
c_plugin_context->ez_id,
|
||||||
|
c_plugin_context->collection_id,
|
||||||
|
got.second);
|
||||||
|
|
||||||
|
builder.encryption(file_encryption_builder.footer_key_metadata(metadata)
|
||||||
|
->algorithm(parquet::ParquetCipher::AES_GCM_V1)
|
||||||
|
->build());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto writer_properties = builder.build();
|
||||||
auto writer = std::make_unique<milvus_storage::PackedRecordBatchWriter>(
|
auto writer = std::make_unique<milvus_storage::PackedRecordBatchWriter>(
|
||||||
trueFs,
|
trueFs,
|
||||||
truePaths,
|
truePaths,
|
||||||
trueSchema,
|
trueSchema,
|
||||||
storage_config,
|
storage_config,
|
||||||
columnGroups,
|
columnGroups,
|
||||||
buffer_size);
|
buffer_size,
|
||||||
|
writer_properties);
|
||||||
AssertInfo(writer, "[StorageV2] writer pointer is null");
|
AssertInfo(writer, "[StorageV2] writer pointer is null");
|
||||||
|
|
||||||
*c_packed_writer = writer.release();
|
*c_packed_writer = writer.release();
|
||||||
return milvus::SuccessCStatus();
|
return milvus::SuccessCStatus();
|
||||||
} catch (std::exception& e) {
|
} catch (std::exception& e) {
|
||||||
@ -102,7 +125,8 @@ NewPackedWriter(struct ArrowSchema* schema,
|
|||||||
int64_t num_paths,
|
int64_t num_paths,
|
||||||
int64_t part_upload_size,
|
int64_t part_upload_size,
|
||||||
CColumnGroups column_groups,
|
CColumnGroups column_groups,
|
||||||
CPackedWriter* c_packed_writer) {
|
CPackedWriter* c_packed_writer,
|
||||||
|
CPluginContext* c_plugin_context) {
|
||||||
SCOPE_CGO_CALL_METRIC();
|
SCOPE_CGO_CALL_METRIC();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -124,10 +148,26 @@ NewPackedWriter(struct ArrowSchema* schema,
|
|||||||
auto columnGroups =
|
auto columnGroups =
|
||||||
*static_cast<std::vector<std::vector<int>>*>(column_groups);
|
*static_cast<std::vector<std::vector<int>>*>(column_groups);
|
||||||
|
|
||||||
auto writer = std::make_unique<milvus_storage::PackedRecordBatchWriter>(
|
parquet::WriterProperties::Builder builder;
|
||||||
trueFs, truePaths, trueSchema, conf, columnGroups, buffer_size);
|
auto plugin_ptr = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
AssertInfo(writer, "[StorageV2] writer pointer is null");
|
if (plugin_ptr != nullptr && c_plugin_context != nullptr) {
|
||||||
|
plugin_ptr->Update(c_plugin_context->ez_id, c_plugin_context->collection_id, std::string(c_plugin_context->key));
|
||||||
|
|
||||||
|
auto got = plugin_ptr->GetEncryptor(c_plugin_context->ez_id, c_plugin_context->collection_id);
|
||||||
|
parquet::FileEncryptionProperties::Builder file_encryption_builder(got.first->GetKey());
|
||||||
|
auto metadata = milvus::storage::EncodeKeyMetadata(
|
||||||
|
c_plugin_context->ez_id,
|
||||||
|
c_plugin_context->collection_id,
|
||||||
|
got.second);
|
||||||
|
builder.encryption(file_encryption_builder.footer_key_metadata(metadata)
|
||||||
|
->algorithm(parquet::ParquetCipher::AES_GCM_V1)
|
||||||
|
->build());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto writer_properties = builder.build();
|
||||||
|
auto writer = std::make_unique<milvus_storage::PackedRecordBatchWriter>(
|
||||||
|
trueFs, truePaths, trueSchema, conf, columnGroups, buffer_size, writer_properties);
|
||||||
|
AssertInfo(writer, "[StorageV2] writer pointer is null");
|
||||||
*c_packed_writer = writer.release();
|
*c_packed_writer = writer.release();
|
||||||
return milvus::SuccessCStatus();
|
return milvus::SuccessCStatus();
|
||||||
} catch (std::exception& e) {
|
} catch (std::exception& e) {
|
||||||
|
|||||||
@ -32,7 +32,8 @@ NewPackedWriterWithStorageConfig(struct ArrowSchema* schema,
|
|||||||
int64_t part_upload_size,
|
int64_t part_upload_size,
|
||||||
CColumnGroups column_groups,
|
CColumnGroups column_groups,
|
||||||
CStorageConfig c_storage_config,
|
CStorageConfig c_storage_config,
|
||||||
CPackedWriter* c_packed_writer);
|
CPackedWriter* c_packed_writer,
|
||||||
|
CPluginContext* c_plugin_context);
|
||||||
|
|
||||||
CStatus
|
CStatus
|
||||||
NewPackedWriter(struct ArrowSchema* schema,
|
NewPackedWriter(struct ArrowSchema* schema,
|
||||||
@ -41,7 +42,8 @@ NewPackedWriter(struct ArrowSchema* schema,
|
|||||||
int64_t num_paths,
|
int64_t num_paths,
|
||||||
int64_t part_upload_size,
|
int64_t part_upload_size,
|
||||||
CColumnGroups column_groups,
|
CColumnGroups column_groups,
|
||||||
CPackedWriter* c_packed_writer);
|
CPackedWriter* c_packed_writer,
|
||||||
|
CPluginContext* c_plugin_context);
|
||||||
|
|
||||||
CStatus
|
CStatus
|
||||||
WriteRecordBatch(CPackedWriter c_packed_writer,
|
WriteRecordBatch(CPackedWriter c_packed_writer,
|
||||||
|
|||||||
@ -56,7 +56,10 @@ NewSegment(CCollection collection,
|
|||||||
switch (seg_type) {
|
switch (seg_type) {
|
||||||
case Growing: {
|
case Growing: {
|
||||||
auto seg = milvus::segcore::CreateGrowingSegment(
|
auto seg = milvus::segcore::CreateGrowingSegment(
|
||||||
col->get_schema(), col->get_index_meta(), segment_id);
|
col->get_schema(),
|
||||||
|
col->get_index_meta(),
|
||||||
|
segment_id,
|
||||||
|
milvus::segcore::SegcoreConfig::default_config());
|
||||||
segment = std::move(seg);
|
segment = std::move(seg);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,6 +14,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
#include "segcore/storagev2translator/GroupChunkTranslator.h"
|
#include "segcore/storagev2translator/GroupChunkTranslator.h"
|
||||||
|
#include "common/type_c.h"
|
||||||
#include "segcore/storagev2translator/GroupCTMeta.h"
|
#include "segcore/storagev2translator/GroupCTMeta.h"
|
||||||
#include "common/GroupChunk.h"
|
#include "common/GroupChunk.h"
|
||||||
#include "mmap/Types.h"
|
#include "mmap/Types.h"
|
||||||
@ -23,8 +24,10 @@
|
|||||||
#include "milvus-storage/common/constants.h"
|
#include "milvus-storage/common/constants.h"
|
||||||
#include "milvus-storage/format/parquet/file_reader.h"
|
#include "milvus-storage/format/parquet/file_reader.h"
|
||||||
#include "storage/ThreadPools.h"
|
#include "storage/ThreadPools.h"
|
||||||
|
#include "storage/KeyRetriever.h"
|
||||||
#include "segcore/memory_planner.h"
|
#include "segcore/memory_planner.h"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -77,7 +80,9 @@ GroupChunkTranslator::GroupChunkTranslator(
|
|||||||
// Get row group metadata from files
|
// Get row group metadata from files
|
||||||
for (const auto& file : insert_files_) {
|
for (const auto& file : insert_files_) {
|
||||||
auto reader =
|
auto reader =
|
||||||
std::make_shared<milvus_storage::FileRowGroupReader>(fs, file);
|
std::make_shared<milvus_storage::FileRowGroupReader>(fs, file,
|
||||||
|
milvus_storage::DEFAULT_READ_BUFFER_SIZE,
|
||||||
|
storage::GetReaderProperties());
|
||||||
row_group_meta_list_.push_back(
|
row_group_meta_list_.push_back(
|
||||||
reader->file_metadata()->GetRowGroupMetadataVector());
|
reader->file_metadata()->GetRowGroupMetadataVector());
|
||||||
auto status = reader->Close();
|
auto status = reader->Close();
|
||||||
|
|||||||
@ -30,6 +30,8 @@ class BinlogReader {
|
|||||||
: data_(binlog_data), size_(length), tell_(0) {
|
: data_(binlog_data), size_(length), tell_(0) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~BinlogReader(){};
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
SegcoreError
|
SegcoreError
|
||||||
ReadSingleValue(T& val) {
|
ReadSingleValue(T& val) {
|
||||||
|
|||||||
@ -43,4 +43,6 @@ if(USE_OPENDAL)
|
|||||||
set(SOURCE_FILES ${SOURCE_FILES} opendal/OpenDALChunkManager.cpp)
|
set(SOURCE_FILES ${SOURCE_FILES} opendal/OpenDALChunkManager.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/plugin)
|
||||||
|
|
||||||
add_library(milvus_storage OBJECT ${SOURCE_FILES})
|
add_library(milvus_storage OBJECT ${SOURCE_FILES})
|
||||||
|
|||||||
@ -17,10 +17,12 @@
|
|||||||
#include "storage/DataCodec.h"
|
#include "storage/DataCodec.h"
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include "storage/Event.h"
|
#include "storage/Event.h"
|
||||||
|
#include "log/Log.h"
|
||||||
#include "storage/Util.h"
|
#include "storage/Util.h"
|
||||||
#include "storage/InsertData.h"
|
#include "storage/InsertData.h"
|
||||||
#include "storage/IndexData.h"
|
#include "storage/IndexData.h"
|
||||||
#include "storage/BinlogReader.h"
|
#include "storage/BinlogReader.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
#include "common/EasyAssert.h"
|
#include "common/EasyAssert.h"
|
||||||
#include "common/Consts.h"
|
#include "common/Consts.h"
|
||||||
|
|
||||||
@ -30,7 +32,8 @@ std::unique_ptr<DataCodec>
|
|||||||
DeserializeFileData(const std::shared_ptr<uint8_t[]> input_data,
|
DeserializeFileData(const std::shared_ptr<uint8_t[]> input_data,
|
||||||
int64_t length,
|
int64_t length,
|
||||||
bool is_field_data) {
|
bool is_field_data) {
|
||||||
auto reader = std::make_shared<BinlogReader>(input_data, length);
|
auto buff_to_keep = input_data; // ref += 1
|
||||||
|
auto reader = std::make_shared<BinlogReader>(buff_to_keep, length); //ref += 1
|
||||||
ReadMediumType(reader);
|
ReadMediumType(reader);
|
||||||
|
|
||||||
DescriptorEvent descriptor_event(reader);
|
DescriptorEvent descriptor_event(reader);
|
||||||
@ -45,11 +48,44 @@ DeserializeFileData(const std::shared_ptr<uint8_t[]> input_data,
|
|||||||
descriptor_fix_part.partition_id,
|
descriptor_fix_part.partition_id,
|
||||||
descriptor_fix_part.segment_id,
|
descriptor_fix_part.segment_id,
|
||||||
descriptor_fix_part.field_id};
|
descriptor_fix_part.field_id};
|
||||||
|
|
||||||
|
|
||||||
|
auto edek = descriptor_event.GetEdekFromExtra();
|
||||||
|
if (edek.length() > 0) {
|
||||||
|
auto cipherPlugin = PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
AssertInfo(cipherPlugin != nullptr, "cipher plugin missing for an encrypted file");
|
||||||
|
|
||||||
|
int64_t ez_id = descriptor_event.GetEZFromExtra();
|
||||||
|
AssertInfo(ez_id != -1, "ez_id meta not exist for a encrypted file");
|
||||||
|
auto decryptor = cipherPlugin->GetDecryptor(ez_id, descriptor_fix_part.collection_id, edek);
|
||||||
|
|
||||||
|
auto left_size = length - descriptor_event.event_header.next_position_;
|
||||||
|
LOG_INFO("start decrypting data, ez_id: {}, collection_id: {}, total length: {}, descriptor_length: {}, cipher text length: {}",
|
||||||
|
ez_id, descriptor_fix_part.collection_id, length, descriptor_event.event_header.next_position_, left_size);
|
||||||
|
|
||||||
|
AssertInfo(left_size > 0, "cipher text length is 0");
|
||||||
|
std::string cipher_str;
|
||||||
|
cipher_str.resize(left_size); // allocate enough space for size bytes
|
||||||
|
|
||||||
|
auto err = reader->Read(left_size, reinterpret_cast<void*>(cipher_str.data()));
|
||||||
|
AssertInfo(err.ok(), "Read binlog failed, err = {}", err.what());
|
||||||
|
|
||||||
|
auto decrypted_str = decryptor->Decrypt(cipher_str);
|
||||||
|
LOG_INFO("cipher plugin decrypted data: cipher text length: {}, plain text length: {}", left_size, decrypted_str.size());
|
||||||
|
|
||||||
|
auto decrypted_ptr = std::shared_ptr<uint8_t[]>(
|
||||||
|
new uint8_t[decrypted_str.size()],
|
||||||
|
[](uint8_t* ptr) { delete[] ptr; });
|
||||||
|
memcpy(decrypted_ptr.get(), decrypted_str.data(), decrypted_str.size());
|
||||||
|
buff_to_keep = decrypted_ptr;
|
||||||
|
|
||||||
|
reader = std::make_shared<BinlogReader>(buff_to_keep, decrypted_str.size());
|
||||||
|
}
|
||||||
|
|
||||||
EventHeader header(reader);
|
EventHeader header(reader);
|
||||||
|
auto event_data_length = header.event_length_ - GetEventHeaderSize(header);
|
||||||
switch (header.event_type_) {
|
switch (header.event_type_) {
|
||||||
case EventType::InsertEvent: {
|
case EventType::InsertEvent: {
|
||||||
auto event_data_length =
|
|
||||||
header.event_length_ - GetEventHeaderSize(header);
|
|
||||||
auto insert_event_data = InsertEventData(
|
auto insert_event_data = InsertEventData(
|
||||||
reader, event_data_length, data_type, nullable, is_field_data);
|
reader, event_data_length, data_type, nullable, is_field_data);
|
||||||
|
|
||||||
@ -61,12 +97,10 @@ DeserializeFileData(const std::shared_ptr<uint8_t[]> input_data,
|
|||||||
insert_event_data.end_timestamp);
|
insert_event_data.end_timestamp);
|
||||||
// DataCodec must keep the input_data alive for zero-copy usage,
|
// DataCodec must keep the input_data alive for zero-copy usage,
|
||||||
// otherwise segmentation violation will occur
|
// otherwise segmentation violation will occur
|
||||||
insert_data->SetData(input_data);
|
insert_data->SetData(buff_to_keep);
|
||||||
return insert_data;
|
return insert_data;
|
||||||
}
|
}
|
||||||
case EventType::IndexFileEvent: {
|
case EventType::IndexFileEvent: {
|
||||||
auto event_data_length =
|
|
||||||
header.event_length_ - GetEventHeaderSize(header);
|
|
||||||
auto index_event_data =
|
auto index_event_data =
|
||||||
IndexEventData(reader, event_data_length, data_type, nullable);
|
IndexEventData(reader, event_data_length, data_type, nullable);
|
||||||
|
|
||||||
@ -105,7 +139,7 @@ DeserializeFileData(const std::shared_ptr<uint8_t[]> input_data,
|
|||||||
index_event_data.end_timestamp);
|
index_event_data.end_timestamp);
|
||||||
// DataCodec must keep the input_data alive for zero-copy usage,
|
// DataCodec must keep the input_data alive for zero-copy usage,
|
||||||
// otherwise segmentation violation will occur
|
// otherwise segmentation violation will occur
|
||||||
index_data->SetData(input_data);
|
index_data->SetData(buff_to_keep);
|
||||||
return index_data;
|
return index_data;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|||||||
@ -57,6 +57,7 @@ DiskFileManagerImpl::DiskFileManagerImpl(
|
|||||||
fileManagerContext.indexMeta) {
|
fileManagerContext.indexMeta) {
|
||||||
rcm_ = fileManagerContext.chunkManagerPtr;
|
rcm_ = fileManagerContext.chunkManagerPtr;
|
||||||
fs_ = fileManagerContext.fs;
|
fs_ = fileManagerContext.fs;
|
||||||
|
plugin_context_ = fileManagerContext.plugin_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
DiskFileManagerImpl::~DiskFileManagerImpl() {
|
DiskFileManagerImpl::~DiskFileManagerImpl() {
|
||||||
@ -265,7 +266,8 @@ DiskFileManagerImpl::AddBatchIndexFiles(
|
|||||||
remote_file_sizes,
|
remote_file_sizes,
|
||||||
remote_files,
|
remote_files,
|
||||||
field_meta_,
|
field_meta_,
|
||||||
index_meta_);
|
index_meta_,
|
||||||
|
plugin_context_);
|
||||||
for (auto& re : res) {
|
for (auto& re : res) {
|
||||||
remote_paths_to_size_[re.first] = re.second;
|
remote_paths_to_size_[re.first] = re.second;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
#include <glog/logging.h>
|
#include <glog/logging.h>
|
||||||
#include <any>
|
#include <any>
|
||||||
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "common/Array.h"
|
#include "common/Array.h"
|
||||||
#include "common/Consts.h"
|
#include "common/Consts.h"
|
||||||
@ -173,6 +174,14 @@ DescriptorEventData::DescriptorEventData(BinlogReaderPtr reader) {
|
|||||||
if (json.contains(NULLABLE)) {
|
if (json.contains(NULLABLE)) {
|
||||||
extras[NULLABLE] = static_cast<bool>(json[NULLABLE]);
|
extras[NULLABLE] = static_cast<bool>(json[NULLABLE]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (json.contains(EDEK)) {
|
||||||
|
extras[EDEK] = static_cast<std::string>(json[EDEK]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (json.contains(EZID)) {
|
||||||
|
extras[EZID] = static_cast<int64_t>(json[EZID]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<uint8_t>
|
std::vector<uint8_t>
|
||||||
@ -182,6 +191,8 @@ DescriptorEventData::Serialize() {
|
|||||||
for (auto v : extras) {
|
for (auto v : extras) {
|
||||||
if (v.first == NULLABLE) {
|
if (v.first == NULLABLE) {
|
||||||
extras_json.emplace(v.first, std::any_cast<bool>(v.second));
|
extras_json.emplace(v.first, std::any_cast<bool>(v.second));
|
||||||
|
} else if (v.first == EZID) {
|
||||||
|
extras_json.emplace(v.first, std::any_cast<int64_t>(v.second));
|
||||||
} else {
|
} else {
|
||||||
extras_json.emplace(v.first, std::any_cast<std::string>(v.second));
|
extras_json.emplace(v.first, std::any_cast<std::string>(v.second));
|
||||||
}
|
}
|
||||||
@ -391,26 +402,46 @@ DescriptorEvent::DescriptorEvent(BinlogReaderPtr reader) {
|
|||||||
event_data = DescriptorEventData(reader);
|
event_data = DescriptorEventData(reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string
|
||||||
|
DescriptorEvent::GetEdekFromExtra(){
|
||||||
|
auto it = event_data.extras.find(EDEK);
|
||||||
|
if (it != event_data.extras.end()) {
|
||||||
|
return std::any_cast<std::string>(it->second);
|
||||||
|
}
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
DescriptorEvent::GetEZFromExtra(){
|
||||||
|
auto it = event_data.extras.find(EZID);
|
||||||
|
if (it != event_data.extras.end()) {
|
||||||
|
return std::any_cast<int64_t>(it->second);
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
std::vector<uint8_t>
|
std::vector<uint8_t>
|
||||||
DescriptorEvent::Serialize() {
|
DescriptorEvent::Serialize() {
|
||||||
|
auto data_bytes = event_data.Serialize();
|
||||||
|
|
||||||
event_header.event_type_ = EventType::DescriptorEvent;
|
event_header.event_type_ = EventType::DescriptorEvent;
|
||||||
auto data = event_data.Serialize();
|
event_header.event_length_ = GetEventHeaderSize(event_header) + data_bytes.size();
|
||||||
int data_size = data.size();
|
event_header.next_position_ = event_header.event_length_ + sizeof(MAGIC_NUM);
|
||||||
|
auto header_bytes = event_header.Serialize();
|
||||||
|
|
||||||
event_header.event_length_ = GetEventHeaderSize(event_header) + data_size;
|
LOG_INFO("DescriptorEvent next position:{}, magic size:{}, header_size:{}, data_size:{}",
|
||||||
auto header = event_header.Serialize();
|
event_header.next_position_,
|
||||||
int header_size = header.size();
|
sizeof(MAGIC_NUM), header_bytes.size(), data_bytes.size());
|
||||||
|
|
||||||
int len = header_size + data_size + sizeof(MAGIC_NUM);
|
std::vector<uint8_t> res(event_header.next_position_, 0);
|
||||||
std::vector<uint8_t> res(len, 0);
|
int32_t offset = 0;
|
||||||
int offset = 0;
|
|
||||||
memcpy(res.data(), &MAGIC_NUM, sizeof(MAGIC_NUM));
|
memcpy(res.data(), &MAGIC_NUM, sizeof(MAGIC_NUM));
|
||||||
offset += sizeof(MAGIC_NUM);
|
offset += sizeof(MAGIC_NUM);
|
||||||
memcpy(res.data() + offset, header.data(), header_size);
|
memcpy(res.data() + offset, header_bytes.data(), header_bytes.size());
|
||||||
offset += header_size;
|
offset += header_bytes.size();
|
||||||
memcpy(res.data() + offset, data.data(), data_size);
|
memcpy(res.data() + offset, data_bytes.data(), data_bytes.size());
|
||||||
offset += data_size;
|
offset += data_bytes.size();
|
||||||
event_header.next_position_ = offset;
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -97,6 +97,12 @@ struct DescriptorEvent {
|
|||||||
|
|
||||||
std::vector<uint8_t>
|
std::vector<uint8_t>
|
||||||
Serialize();
|
Serialize();
|
||||||
|
|
||||||
|
std::string
|
||||||
|
GetEdekFromExtra();
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
GetEZFromExtra();
|
||||||
};
|
};
|
||||||
|
|
||||||
struct BaseEvent {
|
struct BaseEvent {
|
||||||
|
|||||||
@ -56,11 +56,17 @@ struct FileManagerContext {
|
|||||||
for_loading_index = value;
|
for_loading_index = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
set_plugin_context(std::shared_ptr<CPluginContext> context) {
|
||||||
|
plugin_context = context;
|
||||||
|
}
|
||||||
|
|
||||||
FieldDataMeta fieldDataMeta;
|
FieldDataMeta fieldDataMeta;
|
||||||
IndexMeta indexMeta;
|
IndexMeta indexMeta;
|
||||||
ChunkManagerPtr chunkManagerPtr;
|
ChunkManagerPtr chunkManagerPtr;
|
||||||
milvus_storage::ArrowFileSystemPtr fs;
|
milvus_storage::ArrowFileSystemPtr fs;
|
||||||
bool for_loading_index{false};
|
bool for_loading_index{false};
|
||||||
|
std::shared_ptr<CPluginContext> plugin_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define FILEMANAGER_TRY try {
|
#define FILEMANAGER_TRY try {
|
||||||
@ -201,6 +207,7 @@ class FileManagerImpl : public milvus::FileManager {
|
|||||||
IndexMeta index_meta_;
|
IndexMeta index_meta_;
|
||||||
ChunkManagerPtr rcm_;
|
ChunkManagerPtr rcm_;
|
||||||
milvus_storage::ArrowFileSystemPtr fs_;
|
milvus_storage::ArrowFileSystemPtr fs_;
|
||||||
|
std::shared_ptr<CPluginContext> plugin_context_;
|
||||||
};
|
};
|
||||||
|
|
||||||
using FileManagerImplPtr = std::shared_ptr<FileManagerImpl>;
|
using FileManagerImplPtr = std::shared_ptr<FileManagerImpl>;
|
||||||
|
|||||||
@ -17,7 +17,9 @@
|
|||||||
#include "storage/IndexData.h"
|
#include "storage/IndexData.h"
|
||||||
#include "common/EasyAssert.h"
|
#include "common/EasyAssert.h"
|
||||||
#include "common/Consts.h"
|
#include "common/Consts.h"
|
||||||
|
#include "log/Log.h"
|
||||||
#include "storage/Event.h"
|
#include "storage/Event.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
|
|
||||||
namespace milvus::storage {
|
namespace milvus::storage {
|
||||||
|
|
||||||
@ -47,7 +49,7 @@ IndexData::Serialize(StorageType medium) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<uint8_t>
|
std::vector<uint8_t>
|
||||||
IndexData::serialize_to_remote_file() {
|
IndexData::serialize_to_remote_file(std::shared_ptr<CPluginContext> context) {
|
||||||
AssertInfo(field_data_meta_.has_value(), "field data meta not exist");
|
AssertInfo(field_data_meta_.has_value(), "field data meta not exist");
|
||||||
AssertInfo(index_meta_.has_value(), "index meta not exist");
|
AssertInfo(index_meta_.has_value(), "index meta not exist");
|
||||||
// create descriptor event
|
// create descriptor event
|
||||||
@ -78,6 +80,16 @@ IndexData::serialize_to_remote_file() {
|
|||||||
auto& des_event_header = descriptor_event.event_header;
|
auto& des_event_header = descriptor_event.event_header;
|
||||||
// TODO :: set timestamp
|
// TODO :: set timestamp
|
||||||
des_event_header.timestamp_ = 0;
|
des_event_header.timestamp_ = 0;
|
||||||
|
|
||||||
|
std::shared_ptr<milvus::storage::plugin::IEncryptor> encryptor;
|
||||||
|
if (context) {
|
||||||
|
auto cipherPlugin = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
auto pair = cipherPlugin->GetEncryptor(context->ez_id, context->collection_id);
|
||||||
|
encryptor = pair.first;
|
||||||
|
des_event_data.extras[EDEK] = pair.second;
|
||||||
|
des_event_data.extras[EZID] = context->ez_id;
|
||||||
|
}
|
||||||
|
|
||||||
// serialize descriptor event data
|
// serialize descriptor event data
|
||||||
auto des_event_bytes = descriptor_event.Serialize();
|
auto des_event_bytes = descriptor_event.Serialize();
|
||||||
|
|
||||||
@ -96,9 +108,19 @@ IndexData::serialize_to_remote_file() {
|
|||||||
|
|
||||||
// serialize insert event
|
// serialize insert event
|
||||||
auto index_event_bytes = index_event.Serialize();
|
auto index_event_bytes = index_event.Serialize();
|
||||||
|
if (encryptor) {
|
||||||
|
std::string plain_text(index_event_bytes.begin(), index_event_bytes.end());
|
||||||
|
auto cipher_text = encryptor->Encrypt(plain_text);
|
||||||
|
des_event_bytes.insert(des_event_bytes.end(),
|
||||||
|
cipher_text.begin(),
|
||||||
|
cipher_text.end());
|
||||||
|
LOG_INFO("Cipher plugin encrypts index, ez {}, plain text length {}, cipher text length {}",
|
||||||
|
context->ez_id, plain_text.size(), cipher_text.size());
|
||||||
|
} else {
|
||||||
des_event_bytes.insert(des_event_bytes.end(),
|
des_event_bytes.insert(des_event_bytes.end(),
|
||||||
index_event_bytes.begin(),
|
index_event_bytes.begin(),
|
||||||
index_event_bytes.end());
|
index_event_bytes.end());
|
||||||
|
}
|
||||||
return des_event_bytes;
|
return des_event_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -46,7 +46,7 @@ class IndexData : public DataCodec {
|
|||||||
set_index_meta(const IndexMeta& meta);
|
set_index_meta(const IndexMeta& meta);
|
||||||
|
|
||||||
std::vector<uint8_t>
|
std::vector<uint8_t>
|
||||||
serialize_to_remote_file();
|
serialize_to_remote_file(std::shared_ptr<CPluginContext> context = nullptr);
|
||||||
|
|
||||||
std::vector<uint8_t>
|
std::vector<uint8_t>
|
||||||
serialize_to_local_file();
|
serialize_to_local_file();
|
||||||
|
|||||||
60
internal/core/src/storage/KeyRetriever.cpp
Normal file
60
internal/core/src/storage/KeyRetriever.cpp
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
#include "storage/KeyRetriever.h"
|
||||||
|
#include "common/EasyAssert.h"
|
||||||
|
#include "log/Log.h"
|
||||||
|
#include "parquet/properties.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
|
|
||||||
|
namespace milvus::storage {
|
||||||
|
|
||||||
|
std::string
|
||||||
|
KeyRetriever::GetKey(const std::string& key_metadata) {
|
||||||
|
auto plugin = PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
AssertInfo(plugin != nullptr, "cipher plugin not found");
|
||||||
|
auto context = DecodeKeyMetadata(key_metadata);
|
||||||
|
AssertInfo(context != nullptr, "invalid key metadata: {}", key_metadata);
|
||||||
|
auto decryptor = plugin->GetDecryptor(context->ez_id, context->collection_id, std::string(context->key));
|
||||||
|
return decryptor->GetKey();
|
||||||
|
}
|
||||||
|
|
||||||
|
parquet::ReaderProperties
|
||||||
|
GetReaderProperties() {
|
||||||
|
parquet::ReaderProperties reader_properties = parquet::default_reader_properties();
|
||||||
|
std::shared_ptr<milvus::storage::KeyRetriever> key_retriever = std::make_shared<milvus::storage::KeyRetriever>();
|
||||||
|
parquet::FileDecryptionProperties::Builder builder;
|
||||||
|
reader_properties.file_decryption_properties(builder.key_retriever(key_retriever)
|
||||||
|
->plaintext_files_allowed()
|
||||||
|
->build());
|
||||||
|
return reader_properties;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string
|
||||||
|
EncodeKeyMetadata(int64_t ez_id, int64_t collection_id, std::string key) {
|
||||||
|
return std::to_string(ez_id) + "_" + std::to_string(collection_id) + "_" + key;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<CPluginContext>
|
||||||
|
DecodeKeyMetadata(std::string key_metadata) {
|
||||||
|
auto context = std::make_shared<CPluginContext>();
|
||||||
|
try {
|
||||||
|
auto first_pos = key_metadata.find("_");
|
||||||
|
if (first_pos == std::string::npos) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto second_pos = key_metadata.find("_", first_pos + 1);
|
||||||
|
if (second_pos == std::string::npos) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
context->ez_id = std::stoll(key_metadata.substr(0, first_pos));
|
||||||
|
context->collection_id =
|
||||||
|
std::stoll(key_metadata.substr(first_pos + 1, second_pos - (first_pos + 1)));
|
||||||
|
context->key = key_metadata.substr(second_pos + 1).c_str();
|
||||||
|
} catch (const std::exception& e) {
|
||||||
|
LOG_WARN("failed to decode key metadata, reason: {}", e.what());
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return context;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace milvus::storage
|
||||||
20
internal/core/src/storage/KeyRetriever.h
Normal file
20
internal/core/src/storage/KeyRetriever.h
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#include "common/type_c.h"
|
||||||
|
#include "parquet/encryption/encryption.h"
|
||||||
|
|
||||||
|
namespace milvus::storage {
|
||||||
|
|
||||||
|
class KeyRetriever : public parquet::DecryptionKeyRetriever{
|
||||||
|
public:
|
||||||
|
std::string GetKey(const std::string& key_metadata) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
parquet::ReaderProperties
|
||||||
|
GetReaderProperties();
|
||||||
|
|
||||||
|
std::string
|
||||||
|
EncodeKeyMetadata(int64_t ez_id, int64_t collection_id, std::string key) ;
|
||||||
|
|
||||||
|
std::shared_ptr<CPluginContext>
|
||||||
|
DecodeKeyMetadata(std::string key_metadata);
|
||||||
|
|
||||||
|
}// namespace milvus::storage
|
||||||
@ -34,6 +34,7 @@ MemFileManagerImpl::MemFileManagerImpl(
|
|||||||
fileManagerContext.indexMeta) {
|
fileManagerContext.indexMeta) {
|
||||||
rcm_ = fileManagerContext.chunkManagerPtr;
|
rcm_ = fileManagerContext.chunkManagerPtr;
|
||||||
fs_ = fileManagerContext.fs;
|
fs_ = fileManagerContext.fs;
|
||||||
|
plugin_context_ = fileManagerContext.plugin_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
@ -54,7 +55,8 @@ MemFileManagerImpl::AddBinarySet(const BinarySet& binary_set,
|
|||||||
slice_sizes,
|
slice_sizes,
|
||||||
slice_names,
|
slice_names,
|
||||||
field_meta_,
|
field_meta_,
|
||||||
index_meta_);
|
index_meta_,
|
||||||
|
plugin_context_);
|
||||||
for (auto& [file, size] : res) {
|
for (auto& [file, size] : res) {
|
||||||
remote_paths_to_size_[file] = size;
|
remote_paths_to_size_[file] = size;
|
||||||
}
|
}
|
||||||
|
|||||||
142
internal/core/src/storage/PluginLoader.h
Normal file
142
internal/core/src/storage/PluginLoader.h
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <dlfcn.h>
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include "log/Log.h"
|
||||||
|
#include "storage/plugin/PluginInterface.h"
|
||||||
|
#include "common/EasyAssert.h"
|
||||||
|
#include "common/Exception.h"
|
||||||
|
|
||||||
|
namespace milvus::storage {
|
||||||
|
|
||||||
|
class PluginLoader {
|
||||||
|
public:
|
||||||
|
// Delete copy constructor and assignment operator to enforce singleton behavior
|
||||||
|
PluginLoader(const PluginLoader&) = delete;
|
||||||
|
PluginLoader&
|
||||||
|
operator=(const PluginLoader&) = delete;
|
||||||
|
|
||||||
|
static PluginLoader&
|
||||||
|
GetInstance() {
|
||||||
|
static PluginLoader instance;
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
~PluginLoader() {
|
||||||
|
unloadAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
load(const std::string& path) {
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
void* handle = dlopen(path.c_str(), RTLD_LAZY);
|
||||||
|
// void *handle = dlopen(path.c_str(), RTLD_LAZY | RTLD_DEEPBIND);
|
||||||
|
if (!handle) {
|
||||||
|
const char* error = dlerror();
|
||||||
|
ThrowInfo(
|
||||||
|
UnexpectedError,
|
||||||
|
fmt::format("Failed to load plugin: {}, err={}", path, error));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rest error flags
|
||||||
|
dlerror();
|
||||||
|
|
||||||
|
using IPluginPtr = milvus::storage::plugin::IPlugin* (*)();
|
||||||
|
auto createPluginFunc =
|
||||||
|
reinterpret_cast<IPluginPtr>(dlsym(handle, "CreatePlugin"));
|
||||||
|
|
||||||
|
const char* error = dlerror();
|
||||||
|
if (error) {
|
||||||
|
dlclose(handle);
|
||||||
|
ThrowInfo(UnexpectedError,
|
||||||
|
fmt::format("Failed to load plugin: {}", error));
|
||||||
|
}
|
||||||
|
|
||||||
|
error = dlerror();
|
||||||
|
auto pluginPtr = createPluginFunc();
|
||||||
|
if (!pluginPtr) {
|
||||||
|
dlclose(handle);
|
||||||
|
ThrowInfo(
|
||||||
|
UnexpectedError,
|
||||||
|
fmt::format("Failed to init plugin: {}, {}", path, error));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string pluginName = pluginPtr->getPluginName();
|
||||||
|
if (plugins_.find(pluginName) != plugins_.end()) {
|
||||||
|
LOG_DEBUG("Plugin with name {} is already loaded.", pluginName);
|
||||||
|
dlclose(handle);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the plugin and its handle
|
||||||
|
plugins_[pluginName] =
|
||||||
|
std::shared_ptr<milvus::storage::plugin::IPlugin>(pluginPtr);
|
||||||
|
handles_[pluginName] = handle;
|
||||||
|
LOG_INFO("Loaded plugin: {}", pluginName);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
unloadAll() {
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
plugins_.clear();
|
||||||
|
for (auto& handle : handles_) {
|
||||||
|
dlclose(handle.second);
|
||||||
|
}
|
||||||
|
handles_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<milvus::storage::plugin::ICipherPlugin>
|
||||||
|
getCipherPlugin() {
|
||||||
|
auto p = getPlugin("CipherPlugin");
|
||||||
|
if (!p) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return std::dynamic_pointer_cast<
|
||||||
|
milvus::storage::plugin::ICipherPlugin>(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<milvus::storage::plugin::IPlugin>
|
||||||
|
getPlugin(const std::string& name) {
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
auto it = plugins_.find(name);
|
||||||
|
return it != plugins_.end() ? it->second : nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string>
|
||||||
|
listPlugins() const {
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
std::vector<std::string> names;
|
||||||
|
for (const auto& pair : plugins_) {
|
||||||
|
names.push_back(pair.first);
|
||||||
|
}
|
||||||
|
return names;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
unload(const std::string& name) {
|
||||||
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
auto it = plugins_.find(name);
|
||||||
|
if (it != plugins_.end()) {
|
||||||
|
plugins_.erase(it);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto handleIt = handles_.find(name);
|
||||||
|
if (handleIt != handles_.end()) {
|
||||||
|
dlclose(handleIt->second);
|
||||||
|
handles_.erase(handleIt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
PluginLoader() {
|
||||||
|
}
|
||||||
|
|
||||||
|
mutable std::mutex mutex_;
|
||||||
|
std::map<std::string, void*> handles_;
|
||||||
|
std::map<std::string, std::shared_ptr<milvus::storage::plugin::IPlugin>>
|
||||||
|
plugins_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace milvus::storage
|
||||||
@ -20,6 +20,7 @@
|
|||||||
#include "arrow/array/builder_nested.h"
|
#include "arrow/array/builder_nested.h"
|
||||||
#include "arrow/scalar.h"
|
#include "arrow/scalar.h"
|
||||||
#include "arrow/type_fwd.h"
|
#include "arrow/type_fwd.h"
|
||||||
|
#include "common/type_c.h"
|
||||||
#include "fmt/format.h"
|
#include "fmt/format.h"
|
||||||
#include "index/Utils.h"
|
#include "index/Utils.h"
|
||||||
#include "log/Log.h"
|
#include "log/Log.h"
|
||||||
@ -28,6 +29,7 @@
|
|||||||
#include "common/EasyAssert.h"
|
#include "common/EasyAssert.h"
|
||||||
#include "common/FieldData.h"
|
#include "common/FieldData.h"
|
||||||
#include "common/FieldDataInterface.h"
|
#include "common/FieldDataInterface.h"
|
||||||
|
#include "pb/common.pb.h"
|
||||||
#ifdef AZURE_BUILD_DIR
|
#ifdef AZURE_BUILD_DIR
|
||||||
#include "storage/azure/AzureChunkManager.h"
|
#include "storage/azure/AzureChunkManager.h"
|
||||||
#endif
|
#endif
|
||||||
@ -49,6 +51,7 @@
|
|||||||
#include "storage/ThreadPools.h"
|
#include "storage/ThreadPools.h"
|
||||||
#include "storage/MemFileManagerImpl.h"
|
#include "storage/MemFileManagerImpl.h"
|
||||||
#include "storage/DiskFileManagerImpl.h"
|
#include "storage/DiskFileManagerImpl.h"
|
||||||
|
#include "storage/KeyRetriever.h"
|
||||||
#include "segcore/memory_planner.h"
|
#include "segcore/memory_planner.h"
|
||||||
#include "mmap/Types.h"
|
#include "mmap/Types.h"
|
||||||
#include "milvus-storage/format/parquet/file_reader.h"
|
#include "milvus-storage/format/parquet/file_reader.h"
|
||||||
@ -720,7 +723,8 @@ EncodeAndUploadIndexSlice(ChunkManager* chunk_manager,
|
|||||||
int64_t batch_size,
|
int64_t batch_size,
|
||||||
IndexMeta index_meta,
|
IndexMeta index_meta,
|
||||||
FieldDataMeta field_meta,
|
FieldDataMeta field_meta,
|
||||||
std::string object_key) {
|
std::string object_key,
|
||||||
|
std::shared_ptr<CPluginContext> plugin_context) {
|
||||||
std::shared_ptr<IndexData> index_data = nullptr;
|
std::shared_ptr<IndexData> index_data = nullptr;
|
||||||
if (index_meta.index_non_encoding) {
|
if (index_meta.index_non_encoding) {
|
||||||
index_data = std::make_shared<IndexData>(buf, batch_size);
|
index_data = std::make_shared<IndexData>(buf, batch_size);
|
||||||
@ -735,7 +739,7 @@ EncodeAndUploadIndexSlice(ChunkManager* chunk_manager,
|
|||||||
// index not use valid_data, so no need to set nullable==true
|
// index not use valid_data, so no need to set nullable==true
|
||||||
index_data->set_index_meta(index_meta);
|
index_data->set_index_meta(index_meta);
|
||||||
index_data->SetFieldDataMeta(field_meta);
|
index_data->SetFieldDataMeta(field_meta);
|
||||||
auto serialized_index_data = index_data->serialize_to_remote_file();
|
auto serialized_index_data = index_data->serialize_to_remote_file(plugin_context);
|
||||||
auto serialized_index_size = serialized_index_data.size();
|
auto serialized_index_size = serialized_index_data.size();
|
||||||
chunk_manager->Write(
|
chunk_manager->Write(
|
||||||
object_key, serialized_index_data.data(), serialized_index_size);
|
object_key, serialized_index_data.data(), serialized_index_size);
|
||||||
@ -751,9 +755,7 @@ GetObjectData(ChunkManager* remote_chunk_manager,
|
|||||||
std::vector<std::future<std::unique_ptr<DataCodec>>> futures;
|
std::vector<std::future<std::unique_ptr<DataCodec>>> futures;
|
||||||
futures.reserve(remote_files.size());
|
futures.reserve(remote_files.size());
|
||||||
|
|
||||||
auto DownloadAndDeserialize = [&](ChunkManager* chunk_manager,
|
auto DownloadAndDeserialize = [](ChunkManager* chunk_manager, bool is_field_data, const std::string file) {
|
||||||
const std::string& file,
|
|
||||||
bool is_field_data) {
|
|
||||||
// TODO remove this Size() cost
|
// TODO remove this Size() cost
|
||||||
auto fileSize = chunk_manager->Size(file);
|
auto fileSize = chunk_manager->Size(file);
|
||||||
auto buf = std::shared_ptr<uint8_t[]>(new uint8_t[fileSize]);
|
auto buf = std::shared_ptr<uint8_t[]>(new uint8_t[fileSize]);
|
||||||
@ -763,8 +765,7 @@ GetObjectData(ChunkManager* remote_chunk_manager,
|
|||||||
};
|
};
|
||||||
|
|
||||||
for (auto& file : remote_files) {
|
for (auto& file : remote_files) {
|
||||||
futures.emplace_back(pool.Submit(
|
futures.emplace_back(pool.Submit(DownloadAndDeserialize, remote_chunk_manager, is_field_data, file));
|
||||||
DownloadAndDeserialize, remote_chunk_manager, file, is_field_data));
|
|
||||||
}
|
}
|
||||||
return futures;
|
return futures;
|
||||||
}
|
}
|
||||||
@ -775,7 +776,8 @@ PutIndexData(ChunkManager* remote_chunk_manager,
|
|||||||
const std::vector<int64_t>& slice_sizes,
|
const std::vector<int64_t>& slice_sizes,
|
||||||
const std::vector<std::string>& slice_names,
|
const std::vector<std::string>& slice_names,
|
||||||
FieldDataMeta& field_meta,
|
FieldDataMeta& field_meta,
|
||||||
IndexMeta& index_meta) {
|
IndexMeta& index_meta,
|
||||||
|
std::shared_ptr<CPluginContext> plugin_context) {
|
||||||
auto& pool = ThreadPools::GetThreadPool(milvus::ThreadPoolPriority::MIDDLE);
|
auto& pool = ThreadPools::GetThreadPool(milvus::ThreadPoolPriority::MIDDLE);
|
||||||
std::vector<std::future<std::pair<std::string, size_t>>> futures;
|
std::vector<std::future<std::pair<std::string, size_t>>> futures;
|
||||||
AssertInfo(data_slices.size() == slice_sizes.size(),
|
AssertInfo(data_slices.size() == slice_sizes.size(),
|
||||||
@ -794,7 +796,8 @@ PutIndexData(ChunkManager* remote_chunk_manager,
|
|||||||
slice_sizes[i],
|
slice_sizes[i],
|
||||||
index_meta,
|
index_meta,
|
||||||
field_meta,
|
field_meta,
|
||||||
slice_names[i]));
|
slice_names[i],
|
||||||
|
plugin_context));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, int64_t> remote_paths_to_size;
|
std::map<std::string, int64_t> remote_paths_to_size;
|
||||||
@ -1154,7 +1157,10 @@ GetFieldDatasFromStorageV2(std::vector<std::vector<std::string>>& remote_files,
|
|||||||
// get all row groups for each file
|
// get all row groups for each file
|
||||||
std::vector<std::vector<int64_t>> row_group_lists;
|
std::vector<std::vector<int64_t>> row_group_lists;
|
||||||
auto reader = std::make_shared<milvus_storage::FileRowGroupReader>(
|
auto reader = std::make_shared<milvus_storage::FileRowGroupReader>(
|
||||||
fs, column_group_file);
|
fs,
|
||||||
|
column_group_file,
|
||||||
|
milvus_storage::DEFAULT_READ_BUFFER_SIZE,
|
||||||
|
GetReaderProperties());
|
||||||
|
|
||||||
auto row_group_num =
|
auto row_group_num =
|
||||||
reader->file_metadata()->GetRowGroupMetadataVector().size();
|
reader->file_metadata()->GetRowGroupMetadataVector().size();
|
||||||
@ -1180,7 +1186,8 @@ GetFieldDatasFromStorageV2(std::vector<std::vector<std::string>>& remote_files,
|
|||||||
DEFAULT_FIELD_MAX_MEMORY_LIMIT,
|
DEFAULT_FIELD_MAX_MEMORY_LIMIT,
|
||||||
std::move(strategy),
|
std::move(strategy),
|
||||||
row_group_lists,
|
row_group_lists,
|
||||||
nullptr);
|
nullptr,
|
||||||
|
milvus::proto::common::LoadPriority::HIGH);
|
||||||
});
|
});
|
||||||
// read field data from channel
|
// read field data from channel
|
||||||
std::shared_ptr<milvus::ArrowDataWrapper> r;
|
std::shared_ptr<milvus::ArrowDataWrapper> r;
|
||||||
@ -1264,7 +1271,9 @@ GetFieldIDList(FieldId column_group_id,
|
|||||||
return field_id_list;
|
return field_id_list;
|
||||||
}
|
}
|
||||||
auto file_reader = std::make_shared<milvus_storage::FileRowGroupReader>(
|
auto file_reader = std::make_shared<milvus_storage::FileRowGroupReader>(
|
||||||
fs, filepath, arrow_schema);
|
fs, filepath, arrow_schema,
|
||||||
|
milvus_storage::DEFAULT_READ_BUFFER_SIZE,
|
||||||
|
GetReaderProperties());
|
||||||
field_id_list =
|
field_id_list =
|
||||||
file_reader->file_metadata()->GetGroupFieldIDList().GetFieldIDList(
|
file_reader->file_metadata()->GetGroupFieldIDList().GetFieldIDList(
|
||||||
column_group_id.get());
|
column_group_id.get());
|
||||||
|
|||||||
@ -154,7 +154,8 @@ EncodeAndUploadIndexSlice(ChunkManager* chunk_manager,
|
|||||||
int64_t batch_size,
|
int64_t batch_size,
|
||||||
IndexMeta index_meta,
|
IndexMeta index_meta,
|
||||||
FieldDataMeta field_meta,
|
FieldDataMeta field_meta,
|
||||||
std::string object_key);
|
std::string object_key,
|
||||||
|
std::shared_ptr<CPluginContext> plugin_context);
|
||||||
|
|
||||||
std::vector<std::future<std::unique_ptr<DataCodec>>>
|
std::vector<std::future<std::unique_ptr<DataCodec>>>
|
||||||
GetObjectData(
|
GetObjectData(
|
||||||
@ -176,7 +177,8 @@ PutIndexData(ChunkManager* remote_chunk_manager,
|
|||||||
const std::vector<int64_t>& slice_sizes,
|
const std::vector<int64_t>& slice_sizes,
|
||||||
const std::vector<std::string>& slice_names,
|
const std::vector<std::string>& slice_names,
|
||||||
FieldDataMeta& field_meta,
|
FieldDataMeta& field_meta,
|
||||||
IndexMeta& index_meta);
|
IndexMeta& index_meta,
|
||||||
|
std::shared_ptr<CPluginContext> plugin_context);
|
||||||
|
|
||||||
int64_t
|
int64_t
|
||||||
GetTotalNumRowsForFieldDatas(const std::vector<FieldDataPtr>& field_datas);
|
GetTotalNumRowsForFieldDatas(const std::vector<FieldDataPtr>& field_datas);
|
||||||
|
|||||||
63
internal/core/src/storage/plugin/PluginInterface.h
Normal file
63
internal/core/src/storage/plugin/PluginInterface.h
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
|
||||||
|
namespace milvus::storage{
|
||||||
|
namespace plugin{
|
||||||
|
|
||||||
|
class IEncryptor;
|
||||||
|
class IDecryptor;
|
||||||
|
class ICipherPlugin;
|
||||||
|
|
||||||
|
class IPlugin{
|
||||||
|
public:
|
||||||
|
virtual ~IPlugin() = default;
|
||||||
|
|
||||||
|
virtual std::string
|
||||||
|
getPluginName() const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ICipherPlugin: public IPlugin{
|
||||||
|
public:
|
||||||
|
virtual ~ICipherPlugin() = default;
|
||||||
|
|
||||||
|
std::string
|
||||||
|
getPluginName() const override{ return "ICipherPlugin"; }
|
||||||
|
|
||||||
|
virtual void Update(int64_t ez_id, int64_t coll_id, const std::string& key) = 0;
|
||||||
|
|
||||||
|
virtual std::pair<std::shared_ptr<IEncryptor>, std::string>
|
||||||
|
GetEncryptor(int64_t ez_id, int64_t coll_id) = 0;
|
||||||
|
|
||||||
|
virtual std::shared_ptr<IDecryptor>
|
||||||
|
GetDecryptor(int64_t ez_id, int64_t coll_id, const std::string& safeKey) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class IEncryptor {
|
||||||
|
public:
|
||||||
|
virtual ~IEncryptor() = default;
|
||||||
|
virtual std::string
|
||||||
|
Encrypt(const std::string& plaintext) = 0;
|
||||||
|
|
||||||
|
virtual std::string
|
||||||
|
GetKey() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class IDecryptor {
|
||||||
|
public:
|
||||||
|
virtual ~IDecryptor() = default;
|
||||||
|
virtual std::string
|
||||||
|
Decrypt(const std::string& ciphertext) = 0;
|
||||||
|
|
||||||
|
virtual std::string
|
||||||
|
GetKey() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
} // namspace plugin
|
||||||
|
} // namespace milvus::storage
|
||||||
@ -17,11 +17,13 @@
|
|||||||
#include "storage/storage_c.h"
|
#include "storage/storage_c.h"
|
||||||
#include "storage/FileWriter.h"
|
#include "storage/FileWriter.h"
|
||||||
#include "monitor/Monitor.h"
|
#include "monitor/Monitor.h"
|
||||||
|
#include "storage/PluginLoader.h"
|
||||||
#include "storage/RemoteChunkManagerSingleton.h"
|
#include "storage/RemoteChunkManagerSingleton.h"
|
||||||
#include "storage/LocalChunkManagerSingleton.h"
|
#include "storage/LocalChunkManagerSingleton.h"
|
||||||
#include "storage/MmapManager.h"
|
#include "storage/MmapManager.h"
|
||||||
#include "storage/ThreadPools.h"
|
#include "storage/ThreadPools.h"
|
||||||
#include "monitor/scope_metric.h"
|
#include "monitor/scope_metric.h"
|
||||||
|
#include "common/EasyAssert.h"
|
||||||
|
|
||||||
CStatus
|
CStatus
|
||||||
GetLocalUsedSize(const char* c_dir, int64_t* size) {
|
GetLocalUsedSize(const char* c_dir, int64_t* size) {
|
||||||
@ -161,3 +163,38 @@ ResizeTheadPool(int64_t priority, float ratio) {
|
|||||||
milvus::ThreadPools::ResizeThreadPool(
|
milvus::ThreadPools::ResizeThreadPool(
|
||||||
static_cast<milvus::ThreadPoolPriority>(priority), ratio);
|
static_cast<milvus::ThreadPoolPriority>(priority), ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CleanPluginLoader() {
|
||||||
|
milvus::storage::PluginLoader::GetInstance().unloadAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
CStatus
|
||||||
|
InitPluginLoader(const char* plugin_path) {
|
||||||
|
try {
|
||||||
|
milvus::storage::PluginLoader::GetInstance().load(plugin_path);
|
||||||
|
return milvus::SuccessCStatus();
|
||||||
|
} catch (std::exception& e) {
|
||||||
|
return milvus::FailureCStatus(&e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CStatus
|
||||||
|
PutOrRefPluginContext(CPluginContext c_plugin_context){
|
||||||
|
auto cipherPluginPtr = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
if (!cipherPluginPtr) {
|
||||||
|
return milvus::FailureCStatus(milvus::UnexpectedError, "cipher plugin not loaded");
|
||||||
|
}
|
||||||
|
cipherPluginPtr->Update(c_plugin_context.ez_id, c_plugin_context.collection_id, std::string(c_plugin_context.key));
|
||||||
|
return milvus::SuccessCStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
CStatus
|
||||||
|
UnRefPluginContext(CPluginContext c_plugin_context){
|
||||||
|
auto cipherPluginPtr = milvus::storage::PluginLoader::GetInstance().getCipherPlugin();
|
||||||
|
if (!cipherPluginPtr) {
|
||||||
|
return milvus::FailureCStatus(milvus::UnexpectedError, "cipher plugin not loaded");
|
||||||
|
}
|
||||||
|
cipherPluginPtr->Update(c_plugin_context.ez_id, c_plugin_context.collection_id, "");
|
||||||
|
return milvus::SuccessCStatus();
|
||||||
|
}
|
||||||
|
|||||||
@ -42,6 +42,19 @@ ResizeTheadPool(int64_t priority, float ratio);
|
|||||||
CStatus
|
CStatus
|
||||||
InitDiskFileWriterConfig(CDiskWriteConfig c_disk_write_config);
|
InitDiskFileWriterConfig(CDiskWriteConfig c_disk_write_config);
|
||||||
|
|
||||||
|
// Plugin related APIs
|
||||||
|
CStatus
|
||||||
|
InitPluginLoader(const char* plugin_path);
|
||||||
|
|
||||||
|
void
|
||||||
|
CleanPluginLoader();
|
||||||
|
|
||||||
|
CStatus
|
||||||
|
PutOrRefPluginContext(CPluginContext c_plugin_context);
|
||||||
|
|
||||||
|
CStatus
|
||||||
|
UnRefPluginContext(CPluginContext c_plugin_context);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -950,7 +950,7 @@ TEST(CApiTest, SearchTestWhenNullable) {
|
|||||||
TEST(CApiTest, InsertSamePkAfterDeleteOnGrowingSegment) {
|
TEST(CApiTest, InsertSamePkAfterDeleteOnGrowingSegment) {
|
||||||
auto collection = NewCollection(get_default_schema_config().c_str());
|
auto collection = NewCollection(get_default_schema_config().c_str());
|
||||||
CSegmentInterface segment;
|
CSegmentInterface segment;
|
||||||
auto status = NewSegment(collection, Growing, 111, &segment, false);
|
auto status = NewSegment(collection, Growing, 112, &segment, false);
|
||||||
ASSERT_EQ(status.error_code, Success);
|
ASSERT_EQ(status.error_code, Success);
|
||||||
auto col = (milvus::segcore::Collection*)collection;
|
auto col = (milvus::segcore::Collection*)collection;
|
||||||
|
|
||||||
|
|||||||
@ -73,7 +73,8 @@ TEST(CPackedTest, PackedWriterAndReader) {
|
|||||||
1,
|
1,
|
||||||
part_upload_size,
|
part_upload_size,
|
||||||
cgs,
|
cgs,
|
||||||
&c_packed_writer);
|
&c_packed_writer,
|
||||||
|
nullptr);
|
||||||
EXPECT_EQ(c_status.error_code, 0);
|
EXPECT_EQ(c_status.error_code, 0);
|
||||||
EXPECT_NE(c_packed_writer, nullptr);
|
EXPECT_NE(c_packed_writer, nullptr);
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ TEST(CPackedTest, PackedWriterAndReader) {
|
|||||||
ASSERT_TRUE(arrow::ExportSchema(*schema, &c_read_schema).ok());
|
ASSERT_TRUE(arrow::ExportSchema(*schema, &c_read_schema).ok());
|
||||||
CPackedReader c_packed_reader = nullptr;
|
CPackedReader c_packed_reader = nullptr;
|
||||||
c_status = NewPackedReader(
|
c_status = NewPackedReader(
|
||||||
paths, 1, &c_read_schema, buffer_size, &c_packed_reader);
|
paths, 1, &c_read_schema, buffer_size, &c_packed_reader, nullptr);
|
||||||
EXPECT_EQ(c_status.error_code, 0);
|
EXPECT_EQ(c_status.error_code, 0);
|
||||||
EXPECT_NE(c_packed_reader, nullptr);
|
EXPECT_NE(c_packed_reader, nullptr);
|
||||||
|
|
||||||
|
|||||||
@ -387,6 +387,7 @@ func (t *clusteringCompactionTask) BuildCompactionRequest() (*datapb.CompactionP
|
|||||||
StorageVersion: segInfo.GetStorageVersion(),
|
StorageVersion: segInfo.GetStorageVersion(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
WrapPluginContext(taskProto.GetCollectionID(), taskProto.GetSchema().GetProperties(), plan)
|
||||||
log.Info("Compaction handler build clustering compaction plan", zap.Any("PreAllocatedLogIDs", logIDRange))
|
log.Info("Compaction handler build clustering compaction plan", zap.Any("PreAllocatedLogIDs", logIDRange))
|
||||||
return plan, nil
|
return plan, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -356,6 +356,8 @@ func (t *l0CompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, err
|
|||||||
zap.Any("target position", taskProto.GetPos()),
|
zap.Any("target position", taskProto.GetPos()),
|
||||||
zap.Any("target segments count", len(sealedSegBinlogs)),
|
zap.Any("target segments count", len(sealedSegBinlogs)),
|
||||||
zap.Any("PreAllocatedLogIDs", logIDRange))
|
zap.Any("PreAllocatedLogIDs", logIDRange))
|
||||||
|
|
||||||
|
WrapPluginContext(taskProto.GetCollectionID(), taskProto.GetSchema().GetProperties(), plan)
|
||||||
return plan, nil
|
return plan, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -376,7 +376,6 @@ func (t *mixCompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, er
|
|||||||
JsonParams: compactionParams,
|
JsonParams: compactionParams,
|
||||||
CurrentScalarIndexVersion: t.ievm.GetCurrentScalarIndexEngineVersion(),
|
CurrentScalarIndexVersion: t.ievm.GetCurrentScalarIndexEngineVersion(),
|
||||||
}
|
}
|
||||||
|
|
||||||
segIDMap := make(map[int64][]*datapb.FieldBinlog, len(plan.SegmentBinlogs))
|
segIDMap := make(map[int64][]*datapb.FieldBinlog, len(plan.SegmentBinlogs))
|
||||||
segments := make([]*SegmentInfo, 0, len(taskProto.GetInputSegments()))
|
segments := make([]*SegmentInfo, 0, len(taskProto.GetInputSegments()))
|
||||||
for _, segID := range taskProto.GetInputSegments() {
|
for _, segID := range taskProto.GetInputSegments() {
|
||||||
@ -408,6 +407,8 @@ func (t *mixCompactionTask) BuildCompactionRequest() (*datapb.CompactionPlan, er
|
|||||||
// BeginLogID is deprecated, but still assign it for compatibility.
|
// BeginLogID is deprecated, but still assign it for compatibility.
|
||||||
plan.BeginLogID = logIDRange.Begin
|
plan.BeginLogID = logIDRange.Begin
|
||||||
|
|
||||||
|
WrapPluginContext(taskProto.GetCollectionID(), taskProto.GetSchema().GetProperties(), plan)
|
||||||
|
|
||||||
log.Info("Compaction handler refreshed mix compaction plan", zap.Int64("maxSize", plan.GetMaxSize()),
|
log.Info("Compaction handler refreshed mix compaction plan", zap.Int64("maxSize", plan.GetMaxSize()),
|
||||||
zap.Any("PreAllocatedLogIDs", logIDRange), zap.Any("segID2DeltaLogs", segIDMap))
|
zap.Any("PreAllocatedLogIDs", logIDRange), zap.Any("segID2DeltaLogs", segIDMap))
|
||||||
return plan, nil
|
return plan, nil
|
||||||
|
|||||||
@ -56,6 +56,8 @@ func (t CompactionTriggerType) String() string {
|
|||||||
return "Clustering"
|
return "Clustering"
|
||||||
case TriggerTypeSingle:
|
case TriggerTypeSingle:
|
||||||
return "Single"
|
return "Single"
|
||||||
|
case TriggerTypeSort:
|
||||||
|
return "Sort"
|
||||||
default:
|
default:
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,9 +17,13 @@
|
|||||||
package datacoord
|
package datacoord
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus/internal/datacoord/allocator"
|
"github.com/milvus-io/milvus/internal/datacoord/allocator"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PreAllocateBinlogIDs pre-allocates binlog IDs based on the total number of binlogs from
|
// PreAllocateBinlogIDs pre-allocates binlog IDs based on the total number of binlogs from
|
||||||
@ -44,3 +48,33 @@ func PreAllocateBinlogIDs(allocator allocator.Allocator, segmentInfos []*Segment
|
|||||||
begin, end, err := allocator.AllocN(int64(n))
|
begin, end, err := allocator.AllocN(int64(n))
|
||||||
return &datapb.IDRange{Begin: begin, End: end}, err
|
return &datapb.IDRange{Begin: begin, End: end}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WrapPluginContext(collectionID int64, properties []*commonpb.KeyValuePair, msg proto.Message) {
|
||||||
|
pluginContext := hookutil.GetStoragePluginContext(properties, collectionID)
|
||||||
|
if pluginContext == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch msg.(type) {
|
||||||
|
case *datapb.CompactionPlan:
|
||||||
|
plan := msg.(*datapb.CompactionPlan)
|
||||||
|
plan.PluginContext = append(plan.PluginContext, pluginContext...)
|
||||||
|
case *workerpb.CreateJobRequest:
|
||||||
|
job := msg.(*workerpb.CreateJobRequest)
|
||||||
|
job.PluginContext = append(job.PluginContext, pluginContext...)
|
||||||
|
case *workerpb.AnalyzeRequest:
|
||||||
|
job := msg.(*workerpb.AnalyzeRequest)
|
||||||
|
job.PluginContext = append(job.PluginContext, pluginContext...)
|
||||||
|
case *workerpb.CreateStatsRequest:
|
||||||
|
job := msg.(*workerpb.CreateStatsRequest)
|
||||||
|
job.PluginContext = append(job.PluginContext, pluginContext...)
|
||||||
|
case *datapb.ImportRequest:
|
||||||
|
job := msg.(*datapb.ImportRequest)
|
||||||
|
job.PluginContext = append(job.PluginContext, pluginContext...)
|
||||||
|
case *datapb.PreImportRequest:
|
||||||
|
job := msg.(*datapb.PreImportRequest)
|
||||||
|
job.PluginContext = append(job.PluginContext, pluginContext...)
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -288,7 +288,7 @@ func AssemblePreImportRequest(task ImportTask, job ImportJob) *datapb.PreImportR
|
|||||||
return fileStats.GetImportFile()
|
return fileStats.GetImportFile()
|
||||||
})
|
})
|
||||||
|
|
||||||
return &datapb.PreImportRequest{
|
req := &datapb.PreImportRequest{
|
||||||
JobID: task.GetJobID(),
|
JobID: task.GetJobID(),
|
||||||
TaskID: task.GetTaskID(),
|
TaskID: task.GetTaskID(),
|
||||||
CollectionID: task.GetCollectionID(),
|
CollectionID: task.GetCollectionID(),
|
||||||
@ -300,6 +300,8 @@ func AssemblePreImportRequest(task ImportTask, job ImportJob) *datapb.PreImportR
|
|||||||
TaskSlot: task.GetTaskSlot(),
|
TaskSlot: task.GetTaskSlot(),
|
||||||
StorageConfig: createStorageConfig(),
|
StorageConfig: createStorageConfig(),
|
||||||
}
|
}
|
||||||
|
WrapPluginContext(task.GetCollectionID(), job.GetSchema().GetProperties(), req)
|
||||||
|
return req
|
||||||
}
|
}
|
||||||
|
|
||||||
func AssembleImportRequest(task ImportTask, job ImportJob, meta *meta, alloc allocator.Allocator) (*datapb.ImportRequest, error) {
|
func AssembleImportRequest(task ImportTask, job ImportJob, meta *meta, alloc allocator.Allocator) (*datapb.ImportRequest, error) {
|
||||||
@ -357,7 +359,7 @@ func AssembleImportRequest(task ImportTask, job ImportJob, meta *meta, alloc all
|
|||||||
if Params.CommonCfg.EnableStorageV2.GetAsBool() {
|
if Params.CommonCfg.EnableStorageV2.GetAsBool() {
|
||||||
storageVersion = storage.StorageV2
|
storageVersion = storage.StorageV2
|
||||||
}
|
}
|
||||||
return &datapb.ImportRequest{
|
req := &datapb.ImportRequest{
|
||||||
JobID: task.GetJobID(),
|
JobID: task.GetJobID(),
|
||||||
TaskID: task.GetTaskID(),
|
TaskID: task.GetTaskID(),
|
||||||
CollectionID: task.GetCollectionID(),
|
CollectionID: task.GetCollectionID(),
|
||||||
@ -372,7 +374,9 @@ func AssembleImportRequest(task ImportTask, job ImportJob, meta *meta, alloc all
|
|||||||
StorageConfig: createStorageConfig(),
|
StorageConfig: createStorageConfig(),
|
||||||
TaskSlot: task.GetTaskSlot(),
|
TaskSlot: task.GetTaskSlot(),
|
||||||
StorageVersion: storageVersion,
|
StorageVersion: storageVersion,
|
||||||
}, nil
|
}
|
||||||
|
WrapPluginContext(task.GetCollectionID(), job.GetSchema().GetProperties(), req)
|
||||||
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegroupImportFiles(job ImportJob, files []*datapb.ImportFileStats, segmentMaxSize int) [][]*datapb.ImportFileStats {
|
func RegroupImportFiles(job ImportJob, files []*datapb.ImportFileStats, segmentMaxSize int) [][]*datapb.ImportFileStats {
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/internal/datacoord/session"
|
"github.com/milvus-io/milvus/internal/datacoord/session"
|
||||||
globalTask "github.com/milvus-io/milvus/internal/datacoord/task"
|
globalTask "github.com/milvus-io/milvus/internal/datacoord/task"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
@ -35,17 +36,24 @@ type analyzeTask struct {
|
|||||||
|
|
||||||
times *taskcommon.Times
|
times *taskcommon.Times
|
||||||
|
|
||||||
|
schema *schemapb.CollectionSchema
|
||||||
meta *meta
|
meta *meta
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ globalTask.Task = (*analyzeTask)(nil)
|
var _ globalTask.Task = (*analyzeTask)(nil)
|
||||||
|
|
||||||
func newAnalyzeTask(t *indexpb.AnalyzeTask, meta *meta) *analyzeTask {
|
func newAnalyzeTask(t *indexpb.AnalyzeTask, meta *meta) *analyzeTask {
|
||||||
return &analyzeTask{
|
task := &analyzeTask{
|
||||||
AnalyzeTask: t,
|
AnalyzeTask: t,
|
||||||
times: taskcommon.NewTimes(),
|
times: taskcommon.NewTimes(),
|
||||||
meta: meta,
|
meta: meta,
|
||||||
}
|
}
|
||||||
|
coll := meta.GetCollection(t.CollectionID)
|
||||||
|
if coll != nil {
|
||||||
|
task.schema = coll.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
return task
|
||||||
}
|
}
|
||||||
|
|
||||||
func (at *analyzeTask) SetTaskTime(timeType taskcommon.TimeType, time time.Time) {
|
func (at *analyzeTask) SetTaskTime(timeType taskcommon.TimeType, time time.Time) {
|
||||||
@ -142,6 +150,7 @@ func (at *analyzeTask) CreateTaskOnWorker(nodeID int64, cluster session.Cluster)
|
|||||||
Version: task.Version + 1,
|
Version: task.Version + 1,
|
||||||
StorageConfig: createStorageConfig(),
|
StorageConfig: createStorageConfig(),
|
||||||
}
|
}
|
||||||
|
WrapPluginContext(task.CollectionID, at.schema.GetProperties(), req)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/taskcommon"
|
"github.com/milvus-io/milvus/pkg/v2/taskcommon"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type analyzeTaskSuite struct {
|
type analyzeTaskSuite struct {
|
||||||
@ -81,6 +82,7 @@ func (s *analyzeTaskSuite) SetupSuite() {
|
|||||||
|
|
||||||
s.mt = &meta{
|
s.mt = &meta{
|
||||||
analyzeMeta: analyzeMt,
|
analyzeMeta: analyzeMt,
|
||||||
|
collections: typeutil.NewConcurrentMap[int64, *collectionInfo](),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -329,6 +329,8 @@ func (it *indexBuildTask) prepareJobRequest(ctx context.Context, segment *Segmen
|
|||||||
InsertLogs: segment.GetBinlogs(),
|
InsertLogs: segment.GetBinlogs(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WrapPluginContext(segment.GetCollectionID(), schema.GetProperties(), req)
|
||||||
|
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -338,6 +338,7 @@ func (st *statsTask) prepareJobRequest(ctx context.Context, segment *SegmentInfo
|
|||||||
StorageVersion: segment.StorageVersion,
|
StorageVersion: segment.StorageVersion,
|
||||||
CurrentScalarIndexVersion: st.ievm.GetCurrentScalarIndexEngineVersion(),
|
CurrentScalarIndexVersion: st.ievm.GetCurrentScalarIndexEngineVersion(),
|
||||||
}
|
}
|
||||||
|
WrapPluginContext(segment.GetCollectionID(), collInfo.Schema.GetProperties(), req)
|
||||||
|
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -611,6 +611,7 @@ func (t *clusteringCompactionTask) mappingSegment(
|
|||||||
storage.WithDownloader(func(ctx context.Context, paths []string) ([][]byte, error) {
|
storage.WithDownloader(func(ctx context.Context, paths []string) ([][]byte, error) {
|
||||||
return t.binlogIO.Download(ctx, paths)
|
return t.binlogIO.Download(ctx, paths)
|
||||||
}),
|
}),
|
||||||
|
storage.WithCollectionID(t.GetCollection()),
|
||||||
storage.WithVersion(segment.StorageVersion),
|
storage.WithVersion(segment.StorageVersion),
|
||||||
storage.WithBufferSize(t.bufferSize),
|
storage.WithBufferSize(t.bufferSize),
|
||||||
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
||||||
@ -911,6 +912,7 @@ func (t *clusteringCompactionTask) scalarAnalyzeSegment(
|
|||||||
storage.WithBufferSize(t.bufferSize),
|
storage.WithBufferSize(t.bufferSize),
|
||||||
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
||||||
storage.WithNeededFields(requiredFields),
|
storage.WithNeededFields(requiredFields),
|
||||||
|
storage.WithCollectionID(t.GetCollection()),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("new binlog record reader wrong", zap.Error(err))
|
log.Warn("new binlog record reader wrong", zap.Error(err))
|
||||||
|
|||||||
@ -98,6 +98,7 @@ func (s *ClusteringCompactionTaskStorageV2Suite) TestScalarCompactionNormal_V2To
|
|||||||
|
|
||||||
s.task.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
||||||
{
|
{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
FieldBinlogs: storage.SortFieldBinlogs(fBinlogs),
|
FieldBinlogs: storage.SortFieldBinlogs(fBinlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{deltalogs},
|
Deltalogs: []*datapb.FieldBinlog{deltalogs},
|
||||||
@ -160,6 +161,7 @@ func (s *ClusteringCompactionTaskStorageV2Suite) TestScalarCompactionNormal_V2To
|
|||||||
|
|
||||||
s.task.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
||||||
{
|
{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
FieldBinlogs: storage.SortFieldBinlogs(fBinlogs),
|
FieldBinlogs: storage.SortFieldBinlogs(fBinlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{deltalogs},
|
Deltalogs: []*datapb.FieldBinlog{deltalogs},
|
||||||
|
|||||||
@ -92,6 +92,7 @@ func (s *ClusteringCompactionTaskSuite) setupTest() {
|
|||||||
s.plan = &datapb.CompactionPlan{
|
s.plan = &datapb.CompactionPlan{
|
||||||
PlanID: 999,
|
PlanID: 999,
|
||||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{{
|
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{{
|
||||||
|
CollectionID: CollectionID,
|
||||||
SegmentID: 100,
|
SegmentID: 100,
|
||||||
FieldBinlogs: nil,
|
FieldBinlogs: nil,
|
||||||
Field2StatslogPaths: nil,
|
Field2StatslogPaths: nil,
|
||||||
@ -172,6 +173,7 @@ func (s *ClusteringCompactionTaskSuite) TestCompactionInit() {
|
|||||||
s.task.plan.ClusteringKeyField = 100
|
s.task.plan.ClusteringKeyField = 100
|
||||||
s.task.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
||||||
{
|
{
|
||||||
|
CollectionID: CollectionID,
|
||||||
SegmentID: 100,
|
SegmentID: 100,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -223,6 +225,7 @@ func (s *ClusteringCompactionTaskSuite) preparScalarCompactionNormalTask() {
|
|||||||
|
|
||||||
s.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
s.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
||||||
{
|
{
|
||||||
|
CollectionID: CollectionID,
|
||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{
|
Deltalogs: []*datapb.FieldBinlog{
|
||||||
@ -323,6 +326,7 @@ func (s *ClusteringCompactionTaskSuite) prepareScalarCompactionNormalByMemoryLim
|
|||||||
|
|
||||||
s.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
s.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
||||||
{
|
{
|
||||||
|
CollectionID: CollectionID,
|
||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
},
|
},
|
||||||
@ -412,6 +416,7 @@ func (s *ClusteringCompactionTaskSuite) prepareCompactionWithBM25FunctionTask()
|
|||||||
|
|
||||||
s.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
s.plan.SegmentBinlogs = []*datapb.CompactionSegmentBinlogs{
|
||||||
{
|
{
|
||||||
|
CollectionID: CollectionID,
|
||||||
SegmentID: segmentID,
|
SegmentID: segmentID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
},
|
},
|
||||||
|
|||||||
@ -60,6 +60,7 @@ func mergeSortMultipleSegments(ctx context.Context,
|
|||||||
reader, err := storage.NewBinlogRecordReader(ctx,
|
reader, err := storage.NewBinlogRecordReader(ctx,
|
||||||
s.GetFieldBinlogs(),
|
s.GetFieldBinlogs(),
|
||||||
plan.GetSchema(),
|
plan.GetSchema(),
|
||||||
|
storage.WithCollectionID(collectionID),
|
||||||
storage.WithDownloader(binlogIO.Download),
|
storage.WithDownloader(binlogIO.Download),
|
||||||
storage.WithVersion(s.StorageVersion),
|
storage.WithVersion(s.StorageVersion),
|
||||||
storage.WithStorageConfig(compactionParams.StorageConfig),
|
storage.WithStorageConfig(compactionParams.StorageConfig),
|
||||||
|
|||||||
@ -217,6 +217,7 @@ func (t *mixCompactionTask) writeSegment(ctx context.Context,
|
|||||||
reader, err := storage.NewBinlogRecordReader(ctx,
|
reader, err := storage.NewBinlogRecordReader(ctx,
|
||||||
seg.GetFieldBinlogs(),
|
seg.GetFieldBinlogs(),
|
||||||
t.plan.GetSchema(),
|
t.plan.GetSchema(),
|
||||||
|
storage.WithCollectionID(t.collectionID),
|
||||||
storage.WithDownloader(t.binlogIO.Download),
|
storage.WithDownloader(t.binlogIO.Download),
|
||||||
storage.WithVersion(seg.GetStorageVersion()),
|
storage.WithVersion(seg.GetStorageVersion()),
|
||||||
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
||||||
|
|||||||
@ -51,7 +51,7 @@ func TestMixCompactionTaskStorageV2Suite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type MixCompactionTaskStorageV2Suite struct {
|
type MixCompactionTaskStorageV2Suite struct {
|
||||||
MixCompactionTaskSuite
|
MixCompactionTaskStorageV1Suite
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskStorageV2Suite) SetupTest() {
|
func (s *MixCompactionTaskStorageV2Suite) SetupTest() {
|
||||||
@ -109,6 +109,7 @@ func (s *MixCompactionTaskStorageV2Suite) TestCompactDupPK_MixToV2Format() {
|
|||||||
})).Return(lo.Values(kvs), nil).Once()
|
})).Return(lo.Values(kvs), nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{
|
Deltalogs: []*datapb.FieldBinlog{
|
||||||
@ -122,6 +123,7 @@ func (s *MixCompactionTaskStorageV2Suite) TestCompactDupPK_MixToV2Format() {
|
|||||||
binlogs, _, _, _, _, err := s.initStorageV2Segments(1, segID, alloc)
|
binlogs, _, _, _, _, err := s.initStorageV2Segments(1, segID, alloc)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: storage.SortFieldBinlogs(binlogs),
|
FieldBinlogs: storage.SortFieldBinlogs(binlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{},
|
Deltalogs: []*datapb.FieldBinlog{},
|
||||||
@ -156,6 +158,7 @@ func (s *MixCompactionTaskStorageV2Suite) TestCompactDupPK_V2ToV2Format() {
|
|||||||
binlogs, _, _, _, _, err := s.initStorageV2Segments(1, segID, alloc)
|
binlogs, _, _, _, _, err := s.initStorageV2Segments(1, segID, alloc)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: storage.SortFieldBinlogs(binlogs),
|
FieldBinlogs: storage.SortFieldBinlogs(binlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{},
|
Deltalogs: []*datapb.FieldBinlog{},
|
||||||
@ -191,6 +194,7 @@ func (s *MixCompactionTaskStorageV2Suite) TestCompactDupPK_V2ToV1Format() {
|
|||||||
binlogs, _, _, _, _, err := s.initStorageV2Segments(1, segID, alloc)
|
binlogs, _, _, _, _, err := s.initStorageV2Segments(1, segID, alloc)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: storage.SortFieldBinlogs(binlogs),
|
FieldBinlogs: storage.SortFieldBinlogs(binlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{},
|
Deltalogs: []*datapb.FieldBinlog{},
|
||||||
|
|||||||
@ -47,10 +47,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMixCompactionTaskSuite(t *testing.T) {
|
func TestMixCompactionTaskSuite(t *testing.T) {
|
||||||
suite.Run(t, new(MixCompactionTaskSuite))
|
suite.Run(t, new(MixCompactionTaskStorageV1Suite))
|
||||||
}
|
}
|
||||||
|
|
||||||
type MixCompactionTaskSuite struct {
|
type MixCompactionTaskStorageV1Suite struct {
|
||||||
suite.Suite
|
suite.Suite
|
||||||
|
|
||||||
mockBinlogIO *mock_util.MockBinlogIO
|
mockBinlogIO *mock_util.MockBinlogIO
|
||||||
@ -61,11 +61,11 @@ type MixCompactionTaskSuite struct {
|
|||||||
task *mixCompactionTask
|
task *mixCompactionTask
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) SetupSuite() {
|
func (s *MixCompactionTaskStorageV1Suite) SetupSuite() {
|
||||||
paramtable.Get().Init(paramtable.NewBaseTable())
|
paramtable.Get().Init(paramtable.NewBaseTable())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) setupTest() {
|
func (s *MixCompactionTaskStorageV1Suite) setupTest() {
|
||||||
s.mockBinlogIO = mock_util.NewMockBinlogIO(s.T())
|
s.mockBinlogIO = mock_util.NewMockBinlogIO(s.T())
|
||||||
|
|
||||||
s.meta = genTestCollectionMeta()
|
s.meta = genTestCollectionMeta()
|
||||||
@ -79,6 +79,7 @@ func (s *MixCompactionTaskSuite) setupTest() {
|
|||||||
plan := &datapb.CompactionPlan{
|
plan := &datapb.CompactionPlan{
|
||||||
PlanID: 999,
|
PlanID: 999,
|
||||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{{
|
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: 100,
|
SegmentID: 100,
|
||||||
FieldBinlogs: nil,
|
FieldBinlogs: nil,
|
||||||
Field2StatslogPaths: nil,
|
Field2StatslogPaths: nil,
|
||||||
@ -96,11 +97,12 @@ func (s *MixCompactionTaskSuite) setupTest() {
|
|||||||
s.task = NewMixCompactionTask(context.Background(), s.mockBinlogIO, plan, compaction.GenParams())
|
s.task = NewMixCompactionTask(context.Background(), s.mockBinlogIO, plan, compaction.GenParams())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) SetupTest() {
|
func (s *MixCompactionTaskStorageV1Suite) SetupTest() {
|
||||||
s.setupTest()
|
s.setupTest()
|
||||||
|
paramtable.Get().Save("common.storage.enableV2", "false")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) SetupBM25() {
|
func (s *MixCompactionTaskStorageV1Suite) SetupBM25() {
|
||||||
s.mockBinlogIO = mock_util.NewMockBinlogIO(s.T())
|
s.mockBinlogIO = mock_util.NewMockBinlogIO(s.T())
|
||||||
s.meta = genTestCollectionMetaWithBM25()
|
s.meta = genTestCollectionMetaWithBM25()
|
||||||
params, err := compaction.GenerateJSONParams()
|
params, err := compaction.GenerateJSONParams()
|
||||||
@ -111,6 +113,7 @@ func (s *MixCompactionTaskSuite) SetupBM25() {
|
|||||||
plan := &datapb.CompactionPlan{
|
plan := &datapb.CompactionPlan{
|
||||||
PlanID: 999,
|
PlanID: 999,
|
||||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{{
|
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: 100,
|
SegmentID: 100,
|
||||||
FieldBinlogs: nil,
|
FieldBinlogs: nil,
|
||||||
Field2StatslogPaths: nil,
|
Field2StatslogPaths: nil,
|
||||||
@ -128,19 +131,21 @@ func (s *MixCompactionTaskSuite) SetupBM25() {
|
|||||||
s.task = NewMixCompactionTask(context.Background(), s.mockBinlogIO, plan, compaction.GenParams())
|
s.task = NewMixCompactionTask(context.Background(), s.mockBinlogIO, plan, compaction.GenParams())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) SetupSubTest() {
|
func (s *MixCompactionTaskStorageV1Suite) SetupSubTest() {
|
||||||
s.SetupTest()
|
s.SetupTest()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TearDownTest() {
|
func (s *MixCompactionTaskStorageV1Suite) TearDownTest() {
|
||||||
paramtable.Get().Reset(paramtable.Get().CommonCfg.EntityExpirationTTL.Key)
|
paramtable.Get().Reset(paramtable.Get().CommonCfg.EntityExpirationTTL.Key)
|
||||||
|
paramtable.Get().Reset("common.storageType")
|
||||||
|
paramtable.Get().Reset("common.storage.enableV2")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMilvusBirthday() time.Time {
|
func getMilvusBirthday() time.Time {
|
||||||
return time.Date(2019, time.Month(5), 30, 0, 0, 0, 0, time.UTC)
|
return time.Date(2019, time.Month(5), 30, 0, 0, 0, 0, time.UTC)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) prepareCompactDupPKSegments() {
|
func (s *MixCompactionTaskStorageV1Suite) prepareCompactDupPKSegments() {
|
||||||
segments := []int64{7, 8, 9}
|
segments := []int64{7, 8, 9}
|
||||||
dblobs, err := getInt64DeltaBlobs(
|
dblobs, err := getInt64DeltaBlobs(
|
||||||
1,
|
1,
|
||||||
@ -166,6 +171,7 @@ func (s *MixCompactionTaskSuite) prepareCompactDupPKSegments() {
|
|||||||
})).Return(lo.Values(kvs), nil).Once()
|
})).Return(lo.Values(kvs), nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
Deltalogs: []*datapb.FieldBinlog{
|
Deltalogs: []*datapb.FieldBinlog{
|
||||||
@ -175,7 +181,7 @@ func (s *MixCompactionTaskSuite) prepareCompactDupPKSegments() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestCompactDupPK() {
|
func (s *MixCompactionTaskStorageV1Suite) TestCompactDupPK() {
|
||||||
s.prepareCompactDupPKSegments()
|
s.prepareCompactDupPKSegments()
|
||||||
result, err := s.task.Compact()
|
result, err := s.task.Compact()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -192,7 +198,7 @@ func (s *MixCompactionTaskSuite) TestCompactDupPK() {
|
|||||||
s.Empty(segment.Deltalogs)
|
s.Empty(segment.Deltalogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) prepareCompactTwoToOneSegments() {
|
func (s *MixCompactionTaskStorageV1Suite) prepareCompactTwoToOneSegments() {
|
||||||
segments := []int64{5, 6, 7}
|
segments := []int64{5, 6, 7}
|
||||||
alloc := allocator.NewLocalAllocator(7777777, math.MaxInt64)
|
alloc := allocator.NewLocalAllocator(7777777, math.MaxInt64)
|
||||||
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(nil)
|
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(nil)
|
||||||
@ -207,6 +213,7 @@ func (s *MixCompactionTaskSuite) prepareCompactTwoToOneSegments() {
|
|||||||
})).Return(lo.Values(kvs), nil).Once()
|
})).Return(lo.Values(kvs), nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
})
|
})
|
||||||
@ -221,11 +228,12 @@ func (s *MixCompactionTaskSuite) prepareCompactTwoToOneSegments() {
|
|||||||
}, pkoracle.NewBloomFilterSet(), nil)
|
}, pkoracle.NewBloomFilterSet(), nil)
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: seg.SegmentID(),
|
SegmentID: seg.SegmentID(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestCompactTwoToOne() {
|
func (s *MixCompactionTaskStorageV1Suite) TestCompactTwoToOne() {
|
||||||
s.prepareCompactTwoToOneSegments()
|
s.prepareCompactTwoToOneSegments()
|
||||||
result, err := s.task.Compact()
|
result, err := s.task.Compact()
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
@ -242,7 +250,7 @@ func (s *MixCompactionTaskSuite) TestCompactTwoToOne() {
|
|||||||
s.Empty(segment.Deltalogs)
|
s.Empty(segment.Deltalogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) prepareCompactTwoToOneWithBM25Segments() {
|
func (s *MixCompactionTaskStorageV1Suite) prepareCompactTwoToOneWithBM25Segments() {
|
||||||
s.SetupBM25()
|
s.SetupBM25()
|
||||||
segments := []int64{5, 6, 7}
|
segments := []int64{5, 6, 7}
|
||||||
alloc := allocator.NewLocalAllocator(7777777, math.MaxInt64)
|
alloc := allocator.NewLocalAllocator(7777777, math.MaxInt64)
|
||||||
@ -258,6 +266,7 @@ func (s *MixCompactionTaskSuite) prepareCompactTwoToOneWithBM25Segments() {
|
|||||||
})).Return(lo.Values(kvs), nil).Once()
|
})).Return(lo.Values(kvs), nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
})
|
})
|
||||||
@ -272,11 +281,12 @@ func (s *MixCompactionTaskSuite) prepareCompactTwoToOneWithBM25Segments() {
|
|||||||
}, pkoracle.NewBloomFilterSet(), nil)
|
}, pkoracle.NewBloomFilterSet(), nil)
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: seg.SegmentID(),
|
SegmentID: seg.SegmentID(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestCompactTwoToOneWithBM25() {
|
func (s *MixCompactionTaskStorageV1Suite) TestCompactTwoToOneWithBM25() {
|
||||||
s.prepareCompactTwoToOneWithBM25Segments()
|
s.prepareCompactTwoToOneWithBM25Segments()
|
||||||
result, err := s.task.Compact()
|
result, err := s.task.Compact()
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -294,7 +304,7 @@ func (s *MixCompactionTaskSuite) TestCompactTwoToOneWithBM25() {
|
|||||||
s.Empty(segment.Deltalogs)
|
s.Empty(segment.Deltalogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) prepareCompactSortedSegment() {
|
func (s *MixCompactionTaskStorageV1Suite) prepareCompactSortedSegment() {
|
||||||
segments := []int64{1001, 1002, 1003}
|
segments := []int64{1001, 1002, 1003}
|
||||||
alloc := allocator.NewLocalAllocator(100, math.MaxInt64)
|
alloc := allocator.NewLocalAllocator(100, math.MaxInt64)
|
||||||
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(nil)
|
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(nil)
|
||||||
@ -320,6 +330,7 @@ func (s *MixCompactionTaskSuite) prepareCompactSortedSegment() {
|
|||||||
Return([][]byte{blob.GetValue()}, nil).Once()
|
Return([][]byte{blob.GetValue()}, nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
IsSorted: true,
|
IsSorted: true,
|
||||||
@ -330,7 +341,7 @@ func (s *MixCompactionTaskSuite) prepareCompactSortedSegment() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestCompactSortedSegment() {
|
func (s *MixCompactionTaskStorageV1Suite) TestCompactSortedSegment() {
|
||||||
s.prepareCompactSortedSegment()
|
s.prepareCompactSortedSegment()
|
||||||
paramtable.Get().Save("dataNode.compaction.useMergeSort", "true")
|
paramtable.Get().Save("dataNode.compaction.useMergeSort", "true")
|
||||||
defer paramtable.Get().Reset("dataNode.compaction.useMergeSort")
|
defer paramtable.Get().Reset("dataNode.compaction.useMergeSort")
|
||||||
@ -352,7 +363,7 @@ func (s *MixCompactionTaskSuite) TestCompactSortedSegment() {
|
|||||||
s.Empty(segment.Deltalogs)
|
s.Empty(segment.Deltalogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) prepareCompactSortedSegmentLackBinlog() {
|
func (s *MixCompactionTaskStorageV1Suite) prepareCompactSortedSegmentLackBinlog() {
|
||||||
segments := []int64{1001, 1002, 1003}
|
segments := []int64{1001, 1002, 1003}
|
||||||
alloc := allocator.NewLocalAllocator(100, math.MaxInt64)
|
alloc := allocator.NewLocalAllocator(100, math.MaxInt64)
|
||||||
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(nil)
|
s.mockBinlogIO.EXPECT().Upload(mock.Anything, mock.Anything).Return(nil)
|
||||||
@ -399,6 +410,7 @@ func (s *MixCompactionTaskSuite) prepareCompactSortedSegmentLackBinlog() {
|
|||||||
Return([][]byte{blob.GetValue()}, nil).Once()
|
Return([][]byte{blob.GetValue()}, nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
IsSorted: true,
|
IsSorted: true,
|
||||||
@ -410,7 +422,7 @@ func (s *MixCompactionTaskSuite) prepareCompactSortedSegmentLackBinlog() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestCompactSortedSegmentLackBinlog() {
|
func (s *MixCompactionTaskStorageV1Suite) TestCompactSortedSegmentLackBinlog() {
|
||||||
s.prepareCompactSortedSegmentLackBinlog()
|
s.prepareCompactSortedSegmentLackBinlog()
|
||||||
paramtable.Get().Save("dataNode.compaction.useMergeSort", "true")
|
paramtable.Get().Save("dataNode.compaction.useMergeSort", "true")
|
||||||
defer paramtable.Get().Reset("dataNode.compaction.useMergeSort")
|
defer paramtable.Get().Reset("dataNode.compaction.useMergeSort")
|
||||||
@ -432,7 +444,7 @@ func (s *MixCompactionTaskSuite) TestCompactSortedSegmentLackBinlog() {
|
|||||||
s.Empty(segment.Deltalogs)
|
s.Empty(segment.Deltalogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) prepareSplitMergeEntityExpired() {
|
func (s *MixCompactionTaskStorageV1Suite) prepareSplitMergeEntityExpired() {
|
||||||
s.initSegBuffer(1, 3)
|
s.initSegBuffer(1, 3)
|
||||||
collTTL := 864000 // 10 days
|
collTTL := 864000 // 10 days
|
||||||
s.task.currentTime = getMilvusBirthday().Add(time.Second * (time.Duration(collTTL) + 1))
|
s.task.currentTime = getMilvusBirthday().Add(time.Second * (time.Duration(collTTL) + 1))
|
||||||
@ -465,7 +477,7 @@ func (s *MixCompactionTaskSuite) prepareSplitMergeEntityExpired() {
|
|||||||
s.task.plan.SegmentBinlogs[0].FieldBinlogs = fieldBinlogs
|
s.task.plan.SegmentBinlogs[0].FieldBinlogs = fieldBinlogs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestSplitMergeEntityExpired() {
|
func (s *MixCompactionTaskStorageV1Suite) TestSplitMergeEntityExpired() {
|
||||||
s.prepareSplitMergeEntityExpired()
|
s.prepareSplitMergeEntityExpired()
|
||||||
|
|
||||||
err := s.task.preCompact()
|
err := s.task.preCompact()
|
||||||
@ -481,7 +493,7 @@ func (s *MixCompactionTaskSuite) TestSplitMergeEntityExpired() {
|
|||||||
s.Empty(compactionSegments[0].GetField2StatslogPaths())
|
s.Empty(compactionSegments[0].GetField2StatslogPaths())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestMergeNoExpirationLackBinlog() {
|
func (s *MixCompactionTaskStorageV1Suite) TestMergeNoExpirationLackBinlog() {
|
||||||
s.initSegBuffer(1, 4)
|
s.initSegBuffer(1, 4)
|
||||||
deleteTs := tsoutil.ComposeTSByTime(getMilvusBirthday().Add(10*time.Second), 0)
|
deleteTs := tsoutil.ComposeTSByTime(getMilvusBirthday().Add(10*time.Second), 0)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -580,7 +592,7 @@ func (s *MixCompactionTaskSuite) TestMergeNoExpirationLackBinlog() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestMergeNoExpiration() {
|
func (s *MixCompactionTaskStorageV1Suite) TestMergeNoExpiration() {
|
||||||
s.initSegBuffer(1, 4)
|
s.initSegBuffer(1, 4)
|
||||||
deleteTs := tsoutil.ComposeTSByTime(getMilvusBirthday().Add(10*time.Second), 0)
|
deleteTs := tsoutil.ComposeTSByTime(getMilvusBirthday().Add(10*time.Second), 0)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -662,7 +674,7 @@ func (s *MixCompactionTaskSuite) TestMergeNoExpiration() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestGetBM25FieldIDs() {
|
func (s *MixCompactionTaskStorageV1Suite) TestGetBM25FieldIDs() {
|
||||||
fieldIDs := GetBM25FieldIDs(&schemapb.CollectionSchema{
|
fieldIDs := GetBM25FieldIDs(&schemapb.CollectionSchema{
|
||||||
Functions: []*schemapb.FunctionSchema{{}},
|
Functions: []*schemapb.FunctionSchema{{}},
|
||||||
})
|
})
|
||||||
@ -672,7 +684,7 @@ func (s *MixCompactionTaskSuite) TestGetBM25FieldIDs() {
|
|||||||
s.Equal(1, len(fieldIDs))
|
s.Equal(1, len(fieldIDs))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestMergeDeltalogsMultiSegment() {
|
func (s *MixCompactionTaskStorageV1Suite) TestMergeDeltalogsMultiSegment() {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
segIDA int64
|
segIDA int64
|
||||||
dataApk []int64
|
dataApk []int64
|
||||||
@ -767,7 +779,7 @@ func (s *MixCompactionTaskSuite) TestMergeDeltalogsMultiSegment() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestMergeDeltalogsOneSegment() {
|
func (s *MixCompactionTaskStorageV1Suite) TestMergeDeltalogsOneSegment() {
|
||||||
blob, err := getInt64DeltaBlobs(
|
blob, err := getInt64DeltaBlobs(
|
||||||
100,
|
100,
|
||||||
[]int64{1, 2, 3, 4, 5, 1, 2},
|
[]int64{1, 2, 3, 4, 5, 1, 2},
|
||||||
@ -801,7 +813,7 @@ func (s *MixCompactionTaskSuite) TestMergeDeltalogsOneSegment() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) TestCompactFail() {
|
func (s *MixCompactionTaskStorageV1Suite) TestCompactFail() {
|
||||||
s.Run("mock ctx done", func() {
|
s.Run("mock ctx done", func() {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
cancel()
|
cancel()
|
||||||
@ -866,7 +878,7 @@ func getRow(magic int64, ts int64) map[int64]interface{} {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) initMultiRowsSegBuffer(magic, numRows, step int64) {
|
func (s *MixCompactionTaskStorageV1Suite) initMultiRowsSegBuffer(magic, numRows, step int64) {
|
||||||
segWriter, err := NewSegmentWriter(s.meta.GetSchema(), 65535, compactionBatchSize, magic, PartitionID, CollectionID, []int64{})
|
segWriter, err := NewSegmentWriter(s.meta.GetSchema(), 65535, compactionBatchSize, magic, PartitionID, CollectionID, []int64{})
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
|
||||||
@ -885,7 +897,7 @@ func (s *MixCompactionTaskSuite) initMultiRowsSegBuffer(magic, numRows, step int
|
|||||||
s.segWriter = segWriter
|
s.segWriter = segWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) initSegBufferWithBM25(magic int64) {
|
func (s *MixCompactionTaskStorageV1Suite) initSegBufferWithBM25(magic int64) {
|
||||||
segWriter, err := NewSegmentWriter(s.meta.GetSchema(), 100, compactionBatchSize, magic, PartitionID, CollectionID, []int64{102})
|
segWriter, err := NewSegmentWriter(s.meta.GetSchema(), 100, compactionBatchSize, magic, PartitionID, CollectionID, []int64{102})
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
|
||||||
@ -901,7 +913,7 @@ func (s *MixCompactionTaskSuite) initSegBufferWithBM25(magic int64) {
|
|||||||
s.segWriter = segWriter
|
s.segWriter = segWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MixCompactionTaskSuite) initSegBuffer(size int, seed int64) {
|
func (s *MixCompactionTaskStorageV1Suite) initSegBuffer(size int, seed int64) {
|
||||||
segWriter, err := NewSegmentWriter(s.meta.GetSchema(), 100, compactionBatchSize, seed, PartitionID, CollectionID, []int64{})
|
segWriter, err := NewSegmentWriter(s.meta.GetSchema(), 100, compactionBatchSize, seed, PartitionID, CollectionID, []int64{})
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
|
||||||
@ -1214,7 +1226,7 @@ func genTestCollectionMeta() *etcdpb.CollectionMeta {
|
|||||||
|
|
||||||
func BenchmarkMixCompactor(b *testing.B) {
|
func BenchmarkMixCompactor(b *testing.B) {
|
||||||
// Setup
|
// Setup
|
||||||
s := new(MixCompactionTaskSuite)
|
s := new(MixCompactionTaskStorageV1Suite)
|
||||||
|
|
||||||
s.SetT(&testing.T{})
|
s.SetT(&testing.T{})
|
||||||
s.SetupSuite()
|
s.SetupSuite()
|
||||||
@ -1239,6 +1251,7 @@ func BenchmarkMixCompactor(b *testing.B) {
|
|||||||
})).Return(lo.Values(kvs), nil).Once()
|
})).Return(lo.Values(kvs), nil).Once()
|
||||||
|
|
||||||
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
s.task.plan.SegmentBinlogs = append(s.task.plan.SegmentBinlogs, &datapb.CompactionSegmentBinlogs{
|
||||||
|
CollectionID: 1,
|
||||||
SegmentID: segID,
|
SegmentID: segID,
|
||||||
FieldBinlogs: lo.Values(fBinlogs),
|
FieldBinlogs: lo.Values(fBinlogs),
|
||||||
})
|
})
|
||||||
|
|||||||
@ -208,6 +208,7 @@ func (t *sortCompactionTask) sortSegment(ctx context.Context) (*datapb.Compactio
|
|||||||
storage.WithVersion(t.segmentStorageVersion),
|
storage.WithVersion(t.segmentStorageVersion),
|
||||||
storage.WithDownloader(t.binlogIO.Download),
|
storage.WithDownloader(t.binlogIO.Download),
|
||||||
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
storage.WithStorageConfig(t.compactionParams.StorageConfig),
|
||||||
|
storage.WithCollectionID(t.collectionID),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("error creating insert binlog reader", zap.Error(err))
|
log.Warn("error creating insert binlog reader", zap.Error(err))
|
||||||
|
|||||||
@ -204,8 +204,10 @@ func (node *DataNode) Init() error {
|
|||||||
node.importTaskMgr = importv2.NewTaskManager()
|
node.importTaskMgr = importv2.NewTaskManager()
|
||||||
node.importScheduler = importv2.NewScheduler(node.importTaskMgr)
|
node.importScheduler = importv2.NewScheduler(node.importTaskMgr)
|
||||||
|
|
||||||
index.InitSegcore()
|
err := index.InitSegcore()
|
||||||
|
if err != nil {
|
||||||
|
initError = err
|
||||||
|
}
|
||||||
log.Info("init datanode done", zap.String("Address", node.address))
|
log.Info("init datanode done", zap.String("Address", node.address))
|
||||||
})
|
})
|
||||||
return initError
|
return initError
|
||||||
|
|||||||
@ -38,7 +38,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func InitSegcore() {
|
func InitSegcore() error {
|
||||||
cGlogConf := C.CString(path.Join(paramtable.GetBaseTable().GetConfigDir(), paramtable.DefaultGlogConf))
|
cGlogConf := C.CString(path.Join(paramtable.GetBaseTable().GetConfigDir(), paramtable.DefaultGlogConf))
|
||||||
C.IndexBuilderInit(cGlogConf)
|
C.IndexBuilderInit(cGlogConf)
|
||||||
C.free(unsafe.Pointer(cGlogConf))
|
C.free(unsafe.Pointer(cGlogConf))
|
||||||
@ -84,8 +84,11 @@ func InitSegcore() {
|
|||||||
|
|
||||||
// init paramtable change callback for core related config
|
// init paramtable change callback for core related config
|
||||||
initcore.SetupCoreConfigChangelCallback()
|
initcore.SetupCoreConfigChangelCallback()
|
||||||
|
|
||||||
|
return initcore.InitPluginLoader()
|
||||||
}
|
}
|
||||||
|
|
||||||
func CloseSegcore() {
|
func CloseSegcore() {
|
||||||
initcore.CleanGlogManager()
|
initcore.CleanGlogManager()
|
||||||
|
initcore.CleanPluginLoader()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -58,6 +58,8 @@ type indexBuildTask struct {
|
|||||||
tr *timerecord.TimeRecorder
|
tr *timerecord.TimeRecorder
|
||||||
queueDur time.Duration
|
queueDur time.Duration
|
||||||
manager *TaskManager
|
manager *TaskManager
|
||||||
|
|
||||||
|
pluginContext *indexcgopb.StoragePluginContext
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIndexBuildTask(ctx context.Context,
|
func NewIndexBuildTask(ctx context.Context,
|
||||||
@ -65,6 +67,7 @@ func NewIndexBuildTask(ctx context.Context,
|
|||||||
req *workerpb.CreateJobRequest,
|
req *workerpb.CreateJobRequest,
|
||||||
cm storage.ChunkManager,
|
cm storage.ChunkManager,
|
||||||
manager *TaskManager,
|
manager *TaskManager,
|
||||||
|
pluginContext *indexcgopb.StoragePluginContext,
|
||||||
) *indexBuildTask {
|
) *indexBuildTask {
|
||||||
t := &indexBuildTask{
|
t := &indexBuildTask{
|
||||||
ident: fmt.Sprintf("%s/%d", req.GetClusterID(), req.GetBuildID()),
|
ident: fmt.Sprintf("%s/%d", req.GetClusterID(), req.GetBuildID()),
|
||||||
@ -74,6 +77,7 @@ func NewIndexBuildTask(ctx context.Context,
|
|||||||
req: req,
|
req: req,
|
||||||
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %s", req.GetBuildID(), req.GetClusterID())),
|
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %s", req.GetBuildID(), req.GetClusterID())),
|
||||||
manager: manager,
|
manager: manager,
|
||||||
|
pluginContext: pluginContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
t.parseParams()
|
t.parseParams()
|
||||||
@ -297,6 +301,10 @@ func (it *indexBuildTask) Execute(ctx context.Context) error {
|
|||||||
StorageVersion: it.req.GetStorageVersion(),
|
StorageVersion: it.req.GetStorageVersion(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if it.pluginContext != nil {
|
||||||
|
buildIndexParams.StoragePluginContext = it.pluginContext
|
||||||
|
}
|
||||||
|
|
||||||
if buildIndexParams.StorageVersion == storage.StorageV2 {
|
if buildIndexParams.StorageVersion == storage.StorageV2 {
|
||||||
buildIndexParams.SegmentInsertFiles = util.GetSegmentInsertFiles(
|
buildIndexParams.SegmentInsertFiles = util.GetSegmentInsertFiles(
|
||||||
it.req.GetInsertLogs(),
|
it.req.GetInsertLogs(),
|
||||||
@ -305,7 +313,6 @@ func (it *indexBuildTask) Execute(ctx context.Context) error {
|
|||||||
it.req.GetPartitionID(),
|
it.req.GetPartitionID(),
|
||||||
it.req.GetSegmentID())
|
it.req.GetSegmentID())
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("create index", zap.Any("buildIndexParams", buildIndexParams))
|
log.Info("create index", zap.Any("buildIndexParams", buildIndexParams))
|
||||||
|
|
||||||
it.index, err = indexcgowrapper.CreateIndex(ctx, buildIndexParams)
|
it.index, err = indexcgowrapper.CreateIndex(ctx, buildIndexParams)
|
||||||
|
|||||||
@ -233,6 +233,7 @@ func (st *statsTask) sort(ctx context.Context) ([]*datapb.FieldBinlog, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rr, err := storage.NewBinlogRecordReader(ctx, st.req.InsertLogs, st.req.Schema,
|
rr, err := storage.NewBinlogRecordReader(ctx, st.req.InsertLogs, st.req.Schema,
|
||||||
|
storage.WithCollectionID(st.req.CollectionID),
|
||||||
storage.WithVersion(st.req.StorageVersion),
|
storage.WithVersion(st.req.StorageVersion),
|
||||||
storage.WithDownloader(st.binlogIO.Download),
|
storage.WithDownloader(st.binlogIO.Download),
|
||||||
storage.WithStorageConfig(st.req.GetStorageConfig()),
|
storage.WithStorageConfig(st.req.GetStorageConfig()),
|
||||||
|
|||||||
@ -119,7 +119,7 @@ func (suite *IndexBuildTaskSuite) TestBuildMemoryIndex() {
|
|||||||
err = cm.Write(ctx, suite.dataPath, blobs[0].Value)
|
err = cm.Write(ctx, suite.dataPath, blobs[0].Value)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|
||||||
t := NewIndexBuildTask(ctx, cancel, req, cm, NewTaskManager(context.Background()))
|
t := NewIndexBuildTask(ctx, cancel, req, cm, NewTaskManager(context.Background()), nil)
|
||||||
|
|
||||||
err = t.PreExecute(context.Background())
|
err = t.PreExecute(context.Background())
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|||||||
@ -30,8 +30,10 @@ import (
|
|||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus/internal/datanode/index"
|
"github.com/milvus-io/milvus/internal/datanode/index"
|
||||||
"github.com/milvus-io/milvus/internal/flushcommon/io"
|
"github.com/milvus-io/milvus/internal/flushcommon/io"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
@ -98,7 +100,11 @@ func (node *DataNode) CreateJob(ctx context.Context, req *workerpb.CreateJobRequ
|
|||||||
metrics.DataNodeBuildIndexTaskCounter.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
metrics.DataNodeBuildIndexTaskCounter.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
task := index.NewIndexBuildTask(taskCtx, taskCancel, req, cm, node.taskManager)
|
pluginContext, err := ParseCPluginContext(req.GetPluginContext(), req.GetCollectionID())
|
||||||
|
if err != nil {
|
||||||
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
task := index.NewIndexBuildTask(taskCtx, taskCancel, req, cm, node.taskManager, pluginContext)
|
||||||
ret := merr.Success()
|
ret := merr.Success()
|
||||||
if err := node.taskScheduler.TaskQueue.Enqueue(task); err != nil {
|
if err := node.taskScheduler.TaskQueue.Enqueue(task); err != nil {
|
||||||
log.Warn("DataNode failed to schedule",
|
log.Warn("DataNode failed to schedule",
|
||||||
@ -302,7 +308,13 @@ func (node *DataNode) createIndexTask(ctx context.Context, req *workerpb.CreateJ
|
|||||||
metrics.DataNodeBuildIndexTaskCounter.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
metrics.DataNodeBuildIndexTaskCounter.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
task := index.NewIndexBuildTask(taskCtx, taskCancel, req, cm, node.taskManager)
|
|
||||||
|
pluginContext, err := ParseCPluginContext(req.GetPluginContext(), req.GetCollectionID())
|
||||||
|
if err != nil {
|
||||||
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
task := index.NewIndexBuildTask(taskCtx, taskCancel, req, cm, node.taskManager, pluginContext)
|
||||||
ret := merr.Success()
|
ret := merr.Success()
|
||||||
if err := node.taskScheduler.TaskQueue.Enqueue(task); err != nil {
|
if err := node.taskScheduler.TaskQueue.Enqueue(task); err != nil {
|
||||||
log.Warn("DataNode failed to schedule",
|
log.Warn("DataNode failed to schedule",
|
||||||
@ -599,3 +611,17 @@ func (node *DataNode) DropJobsV2(ctx context.Context, req *workerpb.DropJobsV2Re
|
|||||||
return merr.Status(errors.New("DataNode receive dropping unknown type jobs")), nil
|
return merr.Status(errors.New("DataNode receive dropping unknown type jobs")), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ParseCPluginContext(context []*commonpb.KeyValuePair, collectionID int64) (*indexcgopb.StoragePluginContext, error) {
|
||||||
|
pluginContext, err := hookutil.CreateLocalEZByPluginContext(context)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pluginContext != nil {
|
||||||
|
pluginContext.CollectionId = collectionID
|
||||||
|
return pluginContext, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -88,7 +88,7 @@ type IndexServiceSuite struct {
|
|||||||
cm storage.ChunkManager
|
cm storage.ChunkManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_IndexServiceSuite(t *testing.T) {
|
func TestIndexServiceSuite(t *testing.T) {
|
||||||
suite.Run(t, new(IndexServiceSuite))
|
suite.Run(t, new(IndexServiceSuite))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/datanode/compactor"
|
"github.com/milvus-io/milvus/internal/datanode/compactor"
|
||||||
"github.com/milvus-io/milvus/internal/datanode/importv2"
|
"github.com/milvus-io/milvus/internal/datanode/importv2"
|
||||||
"github.com/milvus-io/milvus/internal/flushcommon/io"
|
"github.com/milvus-io/milvus/internal/flushcommon/io"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/importutilv2"
|
"github.com/milvus-io/milvus/internal/util/importutilv2"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
@ -535,43 +536,52 @@ func (node *DataNode) CreateTask(ctx context.Context, request *workerpb.CreateTa
|
|||||||
switch taskType {
|
switch taskType {
|
||||||
case taskcommon.PreImport:
|
case taskcommon.PreImport:
|
||||||
req := &datapb.PreImportRequest{}
|
req := &datapb.PreImportRequest{}
|
||||||
err := proto.Unmarshal(request.GetPayload(), req)
|
if err := proto.Unmarshal(request.GetPayload(), req); err != nil {
|
||||||
if err != nil {
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
if _, err := hookutil.CreateLocalEZByPluginContext(req.GetPluginContext()); err != nil {
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
return node.PreImport(ctx, req)
|
return node.PreImport(ctx, req)
|
||||||
case taskcommon.Import:
|
case taskcommon.Import:
|
||||||
req := &datapb.ImportRequest{}
|
req := &datapb.ImportRequest{}
|
||||||
err := proto.Unmarshal(request.GetPayload(), req)
|
if err := proto.Unmarshal(request.GetPayload(), req); err != nil {
|
||||||
if err != nil {
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
if _, err := hookutil.CreateLocalEZByPluginContext(req.GetPluginContext()); err != nil {
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
return node.ImportV2(ctx, req)
|
return node.ImportV2(ctx, req)
|
||||||
case taskcommon.Compaction:
|
case taskcommon.Compaction:
|
||||||
req := &datapb.CompactionPlan{}
|
req := &datapb.CompactionPlan{}
|
||||||
err := proto.Unmarshal(request.GetPayload(), req)
|
if err := proto.Unmarshal(request.GetPayload(), req); err != nil {
|
||||||
if err != nil {
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
if _, err := hookutil.CreateLocalEZByPluginContext(req.GetPluginContext()); err != nil {
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
return node.CompactionV2(ctx, req)
|
return node.CompactionV2(ctx, req)
|
||||||
case taskcommon.Index:
|
case taskcommon.Index:
|
||||||
req := &workerpb.CreateJobRequest{}
|
req := &workerpb.CreateJobRequest{}
|
||||||
err := proto.Unmarshal(request.GetPayload(), req)
|
if err := proto.Unmarshal(request.GetPayload(), req); err != nil {
|
||||||
if err != nil {
|
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
return node.createIndexTask(ctx, req)
|
return node.createIndexTask(ctx, req)
|
||||||
case taskcommon.Stats:
|
case taskcommon.Stats:
|
||||||
req := &workerpb.CreateStatsRequest{}
|
req := &workerpb.CreateStatsRequest{}
|
||||||
err := proto.Unmarshal(request.GetPayload(), req)
|
if err := proto.Unmarshal(request.GetPayload(), req); err != nil {
|
||||||
if err != nil {
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
if _, err := hookutil.CreateLocalEZByPluginContext(req.GetPluginContext()); err != nil {
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
return node.createStatsTask(ctx, req)
|
return node.createStatsTask(ctx, req)
|
||||||
case taskcommon.Analyze:
|
case taskcommon.Analyze:
|
||||||
req := &workerpb.AnalyzeRequest{}
|
req := &workerpb.AnalyzeRequest{}
|
||||||
err := proto.Unmarshal(request.GetPayload(), req)
|
if err := proto.Unmarshal(request.GetPayload(), req); err != nil {
|
||||||
if err != nil {
|
return merr.Status(err), nil
|
||||||
|
}
|
||||||
|
if _, err := hookutil.CreateLocalEZByPluginContext(req.GetPluginContext()); err != nil {
|
||||||
return merr.Status(err), nil
|
return merr.Status(err), nil
|
||||||
}
|
}
|
||||||
return node.createAnalyzeTask(ctx, req)
|
return node.createAnalyzeTask(ctx, req)
|
||||||
|
|||||||
@ -29,9 +29,11 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/flushcommon/metacache"
|
"github.com/milvus-io/milvus/internal/flushcommon/metacache"
|
||||||
"github.com/milvus-io/milvus/internal/storage"
|
"github.com/milvus-io/milvus/internal/storage"
|
||||||
"github.com/milvus-io/milvus/internal/storagecommon"
|
"github.com/milvus-io/milvus/internal/storagecommon"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||||
@ -147,7 +149,23 @@ func (bw *BulkPackWriterV2) writeInserts(ctx context.Context, pack *SyncPack) (m
|
|||||||
|
|
||||||
bucketName := paramtable.Get().ServiceParam.MinioCfg.BucketName.GetValue()
|
bucketName := paramtable.Get().ServiceParam.MinioCfg.BucketName.GetValue()
|
||||||
|
|
||||||
w, err := storage.NewPackedRecordWriter(bucketName, paths, bw.schema, bw.bufferSize, bw.multiPartUploadSize, columnGroups, bw.storageConfig)
|
var pluginContextPtr *indexcgopb.StoragePluginContext
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
ez := hookutil.GetEzByCollProperties(bw.schema.GetProperties(), pack.collectionID)
|
||||||
|
if ez != nil {
|
||||||
|
unsafe := hookutil.GetCipher().GetUnsafeKey(ez.EzID, ez.CollectionID)
|
||||||
|
if len(unsafe) > 0 {
|
||||||
|
pluginContext := indexcgopb.StoragePluginContext{
|
||||||
|
EncryptionZoneId: ez.EzID,
|
||||||
|
CollectionId: ez.CollectionID,
|
||||||
|
EncryptionKey: string(unsafe),
|
||||||
|
}
|
||||||
|
pluginContextPtr = &pluginContext
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w, err := storage.NewPackedRecordWriter(bucketName, paths, bw.schema, bw.bufferSize, bw.multiPartUploadSize, columnGroups, bw.storageConfig, pluginContextPtr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2707,7 +2707,10 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
rsp.Status = merr.Status(err)
|
rsp.Status = merr.Status(err)
|
||||||
}
|
}
|
||||||
return rsp, err
|
if err != nil {
|
||||||
|
rsp.Status = merr.Status(err)
|
||||||
|
}
|
||||||
|
return rsp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (node *Proxy) search(ctx context.Context, request *milvuspb.SearchRequest, optimizedSearch bool, isRecallEvaluation bool) (*milvuspb.SearchResults, bool, bool, bool, error) {
|
func (node *Proxy) search(ctx context.Context, request *milvuspb.SearchRequest, optimizedSearch bool, isRecallEvaluation bool) (*milvuspb.SearchResults, bool, bool, bool, error) {
|
||||||
|
|||||||
@ -41,6 +41,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/expr"
|
"github.com/milvus-io/milvus/pkg/v2/util/expr"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/logutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/logutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
||||||
@ -262,6 +263,10 @@ func (node *Proxy) Init() error {
|
|||||||
uuid.EnableRandPool()
|
uuid.EnableRandPool()
|
||||||
log.Debug("enable rand pool for UUIDv4 generation")
|
log.Debug("enable rand pool for UUIDv4 generation")
|
||||||
|
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
message.RegisterCipher(hookutil.GetCipher())
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("init proxy done", zap.Int64("nodeID", paramtable.GetNodeID()), zap.String("Address", node.address))
|
log.Info("init proxy done", zap.Int64("nodeID", paramtable.GetNodeID()), zap.String("Address", node.address))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -59,8 +59,7 @@ func MergeMetaSegmentIntoSegmentInfo(info *querypb.SegmentInfo, segments ...*met
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// packSegmentLoadInfo packs SegmentLoadInfo for given segment,
|
// packSegmentLoadInfo packs SegmentLoadInfo for given segment
|
||||||
// packs with index if withIndex is true, this fetch indexes from IndexCoord
|
|
||||||
func PackSegmentLoadInfo(segment *datapb.SegmentInfo, channelCheckpoint *msgpb.MsgPosition, indexes []*querypb.FieldIndexInfo) *querypb.SegmentLoadInfo {
|
func PackSegmentLoadInfo(segment *datapb.SegmentInfo, channelCheckpoint *msgpb.MsgPosition, indexes []*querypb.FieldIndexInfo) *querypb.SegmentLoadInfo {
|
||||||
posTime := tsoutil.PhysicalTime(channelCheckpoint.GetTimestamp())
|
posTime := tsoutil.PhysicalTime(channelCheckpoint.GetTimestamp())
|
||||||
tsLag := time.Since(posTime)
|
tsLag := time.Since(posTime)
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import (
|
|||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/segcore"
|
"github.com/milvus-io/milvus/internal/util/segcore"
|
||||||
"github.com/milvus-io/milvus/internal/util/vecindexmgr"
|
"github.com/milvus-io/milvus/internal/util/vecindexmgr"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
@ -90,7 +91,6 @@ func (m *collectionManager) Get(collectionID int64) *Collection {
|
|||||||
func (m *collectionManager) PutOrRef(collectionID int64, schema *schemapb.CollectionSchema, meta *segcorepb.CollectionIndexMeta, loadMeta *querypb.LoadMetaInfo) error {
|
func (m *collectionManager) PutOrRef(collectionID int64, schema *schemapb.CollectionSchema, meta *segcorepb.CollectionIndexMeta, loadMeta *querypb.LoadMetaInfo) error {
|
||||||
m.mut.Lock()
|
m.mut.Lock()
|
||||||
defer m.mut.Unlock()
|
defer m.mut.Unlock()
|
||||||
|
|
||||||
if collection, ok := m.collections[collectionID]; ok {
|
if collection, ok := m.collections[collectionID]; ok {
|
||||||
if loadMeta.GetSchemaVersion() > collection.schemaVersion {
|
if loadMeta.GetSchemaVersion() > collection.schemaVersion {
|
||||||
// the schema may be changed even the collection is loaded
|
// the schema may be changed even the collection is loaded
|
||||||
@ -112,6 +112,7 @@ func (m *collectionManager) PutOrRef(collectionID int64, schema *schemapb.Collec
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.Ref(1)
|
collection.Ref(1)
|
||||||
m.collections[collectionID] = collection
|
m.collections[collectionID] = collection
|
||||||
m.updateMetric()
|
m.updateMetric()
|
||||||
@ -160,7 +161,6 @@ func (m *collectionManager) Unref(collectionID int64, count uint32) bool {
|
|||||||
zap.Int64("nodeID", paramtable.GetNodeID()), zap.Int64("collectionID", collectionID))
|
zap.Int64("nodeID", paramtable.GetNodeID()), zap.Int64("collectionID", collectionID))
|
||||||
delete(m.collections, collectionID)
|
delete(m.collections, collectionID)
|
||||||
DeleteCollection(collection)
|
DeleteCollection(collection)
|
||||||
|
|
||||||
metrics.CleanupQueryNodeCollectionMetrics(paramtable.GetNodeID(), collectionID)
|
metrics.CleanupQueryNodeCollectionMetrics(paramtable.GetNodeID(), collectionID)
|
||||||
m.updateMetric()
|
m.updateMetric()
|
||||||
return true
|
return true
|
||||||
@ -264,6 +264,7 @@ func (c *Collection) Ref(count uint32) uint32 {
|
|||||||
zap.Int64("collectionID", c.ID()),
|
zap.Int64("collectionID", c.ID()),
|
||||||
zap.Uint32("refCount", refCount),
|
zap.Uint32("refCount", refCount),
|
||||||
)
|
)
|
||||||
|
putOrUpdateStorageContext(c.Schema().GetProperties(), c.ID())
|
||||||
return refCount
|
return refCount
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,9 +375,31 @@ func DeleteCollection(collection *Collection) {
|
|||||||
collection.mu.Lock()
|
collection.mu.Lock()
|
||||||
defer collection.mu.Unlock()
|
defer collection.mu.Unlock()
|
||||||
|
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
ez := hookutil.GetEzByCollProperties(collection.Schema().GetProperties(), collection.ID())
|
||||||
|
if ez != nil {
|
||||||
|
if err := segcore.UnRefPluginContext(ez); err != nil {
|
||||||
|
log.Error("failed to unref plugin context", zap.Int64("collectionID", collection.ID()), zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if collection.ccollection == nil {
|
if collection.ccollection == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
collection.ccollection.Release()
|
collection.ccollection.Release()
|
||||||
collection.ccollection = nil
|
collection.ccollection = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putOrUpdateStorageContext(properties []*commonpb.KeyValuePair, collectionID int64) {
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
ez := hookutil.GetEzByCollProperties(properties, collectionID)
|
||||||
|
if ez != nil {
|
||||||
|
key := hookutil.GetCipher().GetUnsafeKey(ez.EzID, ez.CollectionID)
|
||||||
|
err := segcore.PutOrRefPluginContext(ez, string(key))
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to put or update plugin context", zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -271,7 +271,6 @@ func (loader *segmentLoader) Load(ctx context.Context,
|
|||||||
log.Warn("failed to get collection", zap.Error(err))
|
log.Warn("failed to get collection", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter out loaded & loading segments
|
// Filter out loaded & loading segments
|
||||||
infos := loader.prepare(ctx, segmentType, segments...)
|
infos := loader.prepare(ctx, segmentType, segments...)
|
||||||
defer loader.unregister(infos...)
|
defer loader.unregister(infos...)
|
||||||
|
|||||||
@ -445,7 +445,7 @@ func (node *QueryNode) InitSegcore() error {
|
|||||||
|
|
||||||
// init paramtable change callback for core related config
|
// init paramtable change callback for core related config
|
||||||
initcore.SetupCoreConfigChangelCallback()
|
initcore.SetupCoreConfigChangelCallback()
|
||||||
return nil
|
return initcore.InitPluginLoader()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIndexEngineVersion() (minimal, current int32) {
|
func getIndexEngineVersion() (minimal, current int32) {
|
||||||
|
|||||||
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
||||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/proxyutil"
|
"github.com/milvus-io/milvus/internal/util/proxyutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
@ -50,26 +51,35 @@ func (a *alterCollectionTask) Prepare(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *alterCollectionTask) Execute(ctx context.Context) error {
|
func (a *alterCollectionTask) Execute(ctx context.Context) error {
|
||||||
// Now we only support alter properties of collection
|
log := log.Ctx(ctx).With(
|
||||||
|
zap.String("alterCollectionTask", a.Req.GetCollectionName()),
|
||||||
|
zap.Int64("collectionID", a.Req.GetCollectionID()),
|
||||||
|
zap.Uint64("ts", a.GetTs()))
|
||||||
|
|
||||||
if a.Req.GetProperties() == nil && a.Req.GetDeleteKeys() == nil {
|
if a.Req.GetProperties() == nil && a.Req.GetDeleteKeys() == nil {
|
||||||
return errors.New("The collection properties to alter and keys to delete must not be empty at the same time")
|
log.Warn("alter collection with empty properties and delete keys, expected to set either properties or delete keys ")
|
||||||
|
return errors.New("alter collection with empty properties and delete keys, expect to set either properties or delete keys ")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(a.Req.GetProperties()) > 0 && len(a.Req.GetDeleteKeys()) > 0 {
|
if len(a.Req.GetProperties()) > 0 && len(a.Req.GetDeleteKeys()) > 0 {
|
||||||
return errors.New("can not provide properties and deletekeys at the same time")
|
return errors.New("alter collection cannot provide properties and delete keys at the same time")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldColl, err := a.core.meta.GetCollectionByName(ctx, a.Req.GetDbName(), a.Req.GetCollectionName(), a.ts)
|
if hookutil.ContainsCipherProperties(a.Req.GetProperties(), a.Req.GetDeleteKeys()) {
|
||||||
|
log.Info("skip to alter collection due to cipher properties were detected in the properties")
|
||||||
|
return errors.New("can not alter cipher related properties")
|
||||||
|
}
|
||||||
|
|
||||||
|
oldColl, err := a.core.meta.GetCollectionByName(ctx, a.Req.GetDbName(), a.Req.GetCollectionName(), a.GetTs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Warn("get collection failed during changing collection state",
|
log.Warn("get collection failed during changing collection state", zap.Error(err))
|
||||||
zap.String("collectionName", a.Req.GetCollectionName()), zap.Uint64("ts", a.ts))
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var newProperties []*commonpb.KeyValuePair
|
var newProperties []*commonpb.KeyValuePair
|
||||||
if len(a.Req.Properties) > 0 {
|
if len(a.Req.Properties) > 0 {
|
||||||
if ContainsKeyPairArray(a.Req.GetProperties(), oldColl.Properties) {
|
if IsSubsetOfProperties(a.Req.GetProperties(), oldColl.Properties) {
|
||||||
log.Info("skip to alter collection due to no changes were detected in the properties", zap.Int64("collectionID", oldColl.CollectionID))
|
log.Info("skip to alter collection due to no changes were detected in the properties")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
newProperties = MergeProperties(oldColl.Properties, a.Req.GetProperties())
|
newProperties = MergeProperties(oldColl.Properties, a.Req.GetProperties())
|
||||||
@ -77,8 +87,7 @@ func (a *alterCollectionTask) Execute(ctx context.Context) error {
|
|||||||
newProperties = DeleteProperties(oldColl.Properties, a.Req.GetDeleteKeys())
|
newProperties = DeleteProperties(oldColl.Properties, a.Req.GetDeleteKeys())
|
||||||
}
|
}
|
||||||
|
|
||||||
ts := a.GetTs()
|
return executeAlterCollectionTaskSteps(ctx, a.core, oldColl, oldColl.Properties, newProperties, a.Req, a.GetTs())
|
||||||
return executeAlterCollectionTaskSteps(ctx, a.core, oldColl, oldColl.Properties, newProperties, a.Req, ts)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *alterCollectionTask) GetLockerKey() LockerKey {
|
func (a *alterCollectionTask) GetLockerKey() LockerKey {
|
||||||
|
|||||||
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
||||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/proxyutil"
|
"github.com/milvus-io/milvus/internal/util/proxyutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
@ -63,26 +64,35 @@ func (a *alterDatabaseTask) Prepare(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *alterDatabaseTask) Execute(ctx context.Context) error {
|
func (a *alterDatabaseTask) Execute(ctx context.Context) error {
|
||||||
// Now we support alter and delete properties of database
|
log := log.Ctx(ctx).With(
|
||||||
|
zap.String("alterDatabaseTask", a.Req.GetDbName()),
|
||||||
|
zap.String("db", a.Req.GetDbId()),
|
||||||
|
zap.Uint64("ts", a.GetTs()))
|
||||||
|
|
||||||
if a.Req.GetProperties() == nil && a.Req.GetDeleteKeys() == nil {
|
if a.Req.GetProperties() == nil && a.Req.GetDeleteKeys() == nil {
|
||||||
return errors.New("alter database requires either properties or deletekeys to modify or delete keys, both cannot be empty")
|
log.Warn("alter database with empty properties and delete keys, expected to set either properties or delete keys ")
|
||||||
|
return errors.New("alter database with empty properties and delete keys, expected to set either properties or delete keys")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(a.Req.GetProperties()) > 0 && len(a.Req.GetDeleteKeys()) > 0 {
|
if len(a.Req.GetProperties()) > 0 && len(a.Req.GetDeleteKeys()) > 0 {
|
||||||
return errors.New("alter database operation cannot modify properties and delete keys at the same time")
|
return errors.New("alter database cannot modify properties and delete keys at the same time")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldDB, err := a.core.meta.GetDatabaseByName(ctx, a.Req.GetDbName(), a.ts)
|
if hookutil.ContainsCipherProperties(a.Req.GetProperties(), a.Req.GetDeleteKeys()) {
|
||||||
|
log.Info("skip to alter collection due to cipher properties were detected in the request properties")
|
||||||
|
return errors.New("can not alter cipher related properties")
|
||||||
|
}
|
||||||
|
|
||||||
|
oldDB, err := a.core.meta.GetDatabaseByName(ctx, a.Req.GetDbName(), a.GetTs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Warn("get database failed during changing database props",
|
log.Warn("get database failed during changing database props")
|
||||||
zap.String("databaseName", a.Req.GetDbName()), zap.Uint64("ts", a.ts))
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var newProperties []*commonpb.KeyValuePair
|
var newProperties []*commonpb.KeyValuePair
|
||||||
if (len(a.Req.GetProperties())) > 0 {
|
if (len(a.Req.GetProperties())) > 0 {
|
||||||
if ContainsKeyPairArray(a.Req.GetProperties(), oldDB.Properties) {
|
if IsSubsetOfProperties(a.Req.GetProperties(), oldDB.Properties) {
|
||||||
log.Info("skip to alter database due to no changes were detected in the properties", zap.String("databaseName", a.Req.GetDbName()))
|
log.Info("skip to alter database due to no changes were detected in the properties")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
newProperties = MergeProperties(oldDB.Properties, a.Req.GetProperties())
|
newProperties = MergeProperties(oldDB.Properties, a.Req.GetProperties())
|
||||||
@ -90,7 +100,7 @@ func (a *alterDatabaseTask) Execute(ctx context.Context) error {
|
|||||||
newProperties = DeleteProperties(oldDB.Properties, a.Req.GetDeleteKeys())
|
newProperties = DeleteProperties(oldDB.Properties, a.Req.GetDeleteKeys())
|
||||||
}
|
}
|
||||||
|
|
||||||
return executeAlterDatabaseTaskSteps(ctx, a.core, oldDB, oldDB.Properties, newProperties, a.ts)
|
return executeAlterDatabaseTaskSteps(ctx, a.core, oldDB, oldDB.Properties, newProperties, a.GetTs())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *alterDatabaseTask) GetLockerKey() LockerKey {
|
func (a *alterDatabaseTask) GetLockerKey() LockerKey {
|
||||||
@ -100,7 +110,7 @@ func (a *alterDatabaseTask) GetLockerKey() LockerKey {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func MergeProperties(oldProps []*commonpb.KeyValuePair, updatedProps []*commonpb.KeyValuePair) []*commonpb.KeyValuePair {
|
func MergeProperties(oldProps, updatedProps []*commonpb.KeyValuePair) []*commonpb.KeyValuePair {
|
||||||
_, existEndTS := common.GetReplicateEndTS(updatedProps)
|
_, existEndTS := common.GetReplicateEndTS(updatedProps)
|
||||||
if existEndTS {
|
if existEndTS {
|
||||||
updatedProps = append(updatedProps, &commonpb.KeyValuePair{
|
updatedProps = append(updatedProps, &commonpb.KeyValuePair{
|
||||||
|
|||||||
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/coordinator/snmanager"
|
"github.com/milvus-io/milvus/internal/coordinator/snmanager"
|
||||||
"github.com/milvus-io/milvus/internal/distributed/streaming"
|
"github.com/milvus-io/milvus/internal/distributed/streaming"
|
||||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/proxyutil"
|
"github.com/milvus-io/milvus/internal/util/proxyutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/streamingutil"
|
"github.com/milvus-io/milvus/internal/util/streamingutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
@ -282,6 +283,9 @@ func (t *createCollectionTask) prepareSchema(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set properties for persistent
|
||||||
|
schema.Properties = t.Req.GetProperties()
|
||||||
|
|
||||||
t.appendSysFields(&schema)
|
t.appendSysFields(&schema)
|
||||||
t.schema = &schema
|
t.schema = &schema
|
||||||
return nil
|
return nil
|
||||||
@ -380,6 +384,10 @@ func (t *createCollectionTask) Prepare(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
t.dbProperties = db.Properties
|
t.dbProperties = db.Properties
|
||||||
|
|
||||||
|
if hookutil.GetEzPropByDBProperties(t.dbProperties) != nil {
|
||||||
|
t.Req.Properties = append(t.Req.Properties, hookutil.GetEzPropByDBProperties(t.dbProperties))
|
||||||
|
}
|
||||||
|
|
||||||
if err := t.validate(ctx); err != nil {
|
if err := t.validate(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import (
|
|||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
)
|
)
|
||||||
@ -46,6 +47,14 @@ func (t *createDatabaseTask) Prepare(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use dbID as ezID because the dbID is unqiue
|
||||||
|
properties, err := hookutil.TidyDBCipherProperties(t.dbID, t.Req.Properties)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Req.Properties = properties
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||||
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer/channel"
|
"github.com/milvus-io/milvus/internal/streamingcoord/server/balancer/channel"
|
||||||
"github.com/milvus-io/milvus/internal/tso"
|
"github.com/milvus-io/milvus/internal/tso"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||||
@ -42,6 +43,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/v2/util/contextutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/contextutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||||
)
|
)
|
||||||
@ -299,6 +301,19 @@ func (mt *MetaTable) createDefaultDb() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defaultRootKey := paramtable.GetCipherParams().DefaultRootKey.GetValue()
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() && len(defaultRootKey) > 0 {
|
||||||
|
// Set unique ID as ezID because the default dbID for each cluster
|
||||||
|
// is the same
|
||||||
|
ezID, err := mt.tsoAllocator.GenerateTSO(1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cipherProps := hookutil.GetDBCipherProperties(ezID, defaultRootKey)
|
||||||
|
defaultProperties = append(defaultProperties, cipherProps...)
|
||||||
|
}
|
||||||
|
|
||||||
return mt.createDatabasePrivate(mt.ctx, model.NewDefaultDatabase(defaultProperties), ts)
|
return mt.createDatabasePrivate(mt.ctx, model.NewDefaultDatabase(defaultProperties), ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,6 +338,11 @@ func (mt *MetaTable) createDatabasePrivate(ctx context.Context, db *model.Databa
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Call back cipher plugin when creating database succeeded
|
||||||
|
if err := hookutil.CreateEZByDBProperties(db.Properties); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
mt.names.createDbIfNotExist(dbName)
|
mt.names.createDbIfNotExist(dbName)
|
||||||
mt.aliases.createDbIfNotExist(dbName)
|
mt.aliases.createDbIfNotExist(dbName)
|
||||||
mt.dbName2Meta[dbName] = db
|
mt.dbName2Meta[dbName] = db
|
||||||
@ -374,6 +394,11 @@ func (mt *MetaTable) DropDatabase(ctx context.Context, dbName string, ts typeuti
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Call back cipher plugin when dropping database succeeded
|
||||||
|
if err := hookutil.RemoveEZByDBProperties(db.Properties); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
mt.names.dropDb(dbName)
|
mt.names.dropDb(dbName)
|
||||||
mt.aliases.dropDb(dbName)
|
mt.aliases.dropDb(dbName)
|
||||||
delete(mt.dbName2Meta, dbName)
|
delete(mt.dbName2Meta, dbName)
|
||||||
|
|||||||
@ -42,33 +42,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// EqualKeyPairArray check whether 2 KeyValuePairs are equal
|
// EqualKeyPairArray check whether 2 KeyValuePairs are equal
|
||||||
func EqualKeyPairArray(p1 []*commonpb.KeyValuePair, p2 []*commonpb.KeyValuePair) bool {
|
func IsSubsetOfProperties(src, target []*commonpb.KeyValuePair) bool {
|
||||||
if len(p1) != len(p2) {
|
tmpMap := make(map[string]string)
|
||||||
return false
|
|
||||||
}
|
|
||||||
m1 := make(map[string]string)
|
|
||||||
for _, p := range p1 {
|
|
||||||
m1[p.Key] = p.Value
|
|
||||||
}
|
|
||||||
for _, p := range p2 {
|
|
||||||
val, ok := m1[p.Key]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if val != p.Value {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ContainsKeyPairArray(p1, p2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ContainsKeyPairArray(src []*commonpb.KeyValuePair, target []*commonpb.KeyValuePair) bool {
|
|
||||||
m1 := make(map[string]string)
|
|
||||||
for _, p := range target {
|
for _, p := range target {
|
||||||
m1[p.Key] = p.Value
|
tmpMap[p.Key] = p.Value
|
||||||
}
|
}
|
||||||
for _, p := range src {
|
for _, p := range src {
|
||||||
val, ok := m1[p.Key]
|
// new key value in src
|
||||||
|
val, ok := tmpMap[p.Key]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@ -28,39 +28,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_EqualKeyPairArray(t *testing.T) {
|
|
||||||
p1 := []*commonpb.KeyValuePair{
|
|
||||||
{
|
|
||||||
Key: "k1",
|
|
||||||
Value: "v1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
p2 := []*commonpb.KeyValuePair{}
|
|
||||||
assert.False(t, EqualKeyPairArray(p1, p2))
|
|
||||||
|
|
||||||
p2 = append(p2, &commonpb.KeyValuePair{
|
|
||||||
Key: "k2",
|
|
||||||
Value: "v2",
|
|
||||||
})
|
|
||||||
assert.False(t, EqualKeyPairArray(p1, p2))
|
|
||||||
p2 = []*commonpb.KeyValuePair{
|
|
||||||
{
|
|
||||||
Key: "k1",
|
|
||||||
Value: "v2",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.False(t, EqualKeyPairArray(p1, p2))
|
|
||||||
|
|
||||||
p2 = []*commonpb.KeyValuePair{
|
|
||||||
{
|
|
||||||
Key: "k1",
|
|
||||||
Value: "v1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.True(t, EqualKeyPairArray(p1, p2))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_EncodeMsgPositions(t *testing.T) {
|
func Test_EncodeMsgPositions(t *testing.T) {
|
||||||
mp := &msgstream.MsgPosition{
|
mp := &msgstream.MsgPosition{
|
||||||
ChannelName: "test",
|
ChannelName: "test",
|
||||||
@ -316,3 +283,173 @@ func TestGetRateLimitConfigErr(t *testing.T) {
|
|||||||
assert.EqualValues(t, 100, v)
|
assert.EqualValues(t, 100, v)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsSubsetOfProperties(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
src []*commonpb.KeyValuePair
|
||||||
|
target []*commonpb.KeyValuePair
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty src and empty target",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{},
|
||||||
|
target: []*commonpb.KeyValuePair{},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty src with non-empty target",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-empty src with empty target",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{},
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "src is subset of target - single pair",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "src is subset of target - multiple pairs",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key3", Value: "value3"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
{Key: "key3", Value: "value3"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "src equals target",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "src key not in target",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key_missing", Value: "value_missing"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "src key exists but value differs",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "different_value"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate keys in src - all match target",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate keys in target - src subset",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty string values",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: ""},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: ""},
|
||||||
|
{Key: "key2", Value: "value2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty string value mismatch",
|
||||||
|
args: args{
|
||||||
|
src: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: ""},
|
||||||
|
},
|
||||||
|
target: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "key1", Value: "value1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got := IsSubsetOfProperties(tt.args.src, tt.args.target)
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -23,8 +23,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BinlogReader is an object to read binlog file. Binlog file's format can be
|
// BinlogReader is an object to read binlog file. Binlog file's format can be
|
||||||
@ -60,21 +63,6 @@ func (reader *BinlogReader) NextEventReader() (*EventReader, error) {
|
|||||||
return reader.eventReader, nil
|
return reader.eventReader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *BinlogReader) readMagicNumber() (int32, error) {
|
|
||||||
var err error
|
|
||||||
reader.magicNumber, err = readMagicNumber(reader.buffer)
|
|
||||||
return reader.magicNumber, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reader *BinlogReader) readDescriptorEvent() (*descriptorEvent, error) {
|
|
||||||
event, err := ReadDescriptorEvent(reader.buffer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
reader.descriptorEvent = *event
|
|
||||||
return &reader.descriptorEvent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readMagicNumber(buffer io.Reader) (int32, error) {
|
func readMagicNumber(buffer io.Reader) (int32, error) {
|
||||||
var magicNumber int32
|
var magicNumber int32
|
||||||
if err := binary.Read(buffer, common.Endian, &magicNumber); err != nil {
|
if err := binary.Read(buffer, common.Endian, &magicNumber); err != nil {
|
||||||
@ -97,6 +85,7 @@ func ReadDescriptorEvent(buffer io.Reader) (*descriptorEvent, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &descriptorEvent{
|
return &descriptorEvent{
|
||||||
descriptorEventHeader: *header,
|
descriptorEventHeader: *header,
|
||||||
descriptorEventData: *data,
|
descriptorEventData: *data,
|
||||||
@ -118,18 +107,70 @@ func (reader *BinlogReader) Close() {
|
|||||||
reader.isClose = true
|
reader.isClose = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBinlogReader creates binlogReader to read binlog file.
|
type BinlogReaderOption func(base *BinlogReader) error
|
||||||
func NewBinlogReader(data []byte) (*BinlogReader, error) {
|
|
||||||
reader := &BinlogReader{
|
func WithReaderDecryptionContext(ezID, collectionID int64) BinlogReaderOption {
|
||||||
buffer: bytes.NewBuffer(data),
|
return func(base *BinlogReader) error {
|
||||||
isClose: false,
|
edek, ok := base.descriptorEvent.GetEdek()
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := reader.readMagicNumber(); err != nil {
|
decryptor, err := hookutil.GetCipher().GetDecryptor(ezID, collectionID, []byte(edek))
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
log.Error("failed to get decryptor", zap.Int64("ezID", ezID), zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if _, err := reader.readDescriptorEvent(); err != nil {
|
|
||||||
return nil, err
|
cipherText := make([]byte, base.buffer.Len())
|
||||||
|
if err := binary.Read(base.buffer, common.Endian, cipherText); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("Binlog reader starts to decypt cipher text",
|
||||||
|
zap.Int64("collectionID", collectionID),
|
||||||
|
zap.Int64("fieldID", base.descriptorEvent.FieldID),
|
||||||
|
zap.Int("cipher size", len(cipherText)),
|
||||||
|
)
|
||||||
|
decrypted, err := decryptor.Decrypt(cipherText)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to decrypt", zap.Int64("ezID", ezID), zap.Int64("collectionID", collectionID), zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debug("Binlog reader decrypted cipher text",
|
||||||
|
zap.Int64("collectionID", collectionID),
|
||||||
|
zap.Int64("fieldID", base.descriptorEvent.FieldID),
|
||||||
|
zap.Int("cipher size", len(cipherText)),
|
||||||
|
zap.Int("plain size", len(decrypted)),
|
||||||
|
)
|
||||||
|
base.buffer = bytes.NewBuffer(decrypted)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return reader, nil
|
}
|
||||||
|
|
||||||
|
// NewBinlogReader creates binlogReader to read binlog file.
|
||||||
|
func NewBinlogReader(data []byte, opts ...BinlogReaderOption) (*BinlogReader, error) {
|
||||||
|
buffer := bytes.NewBuffer(data)
|
||||||
|
if _, err := readMagicNumber(buffer); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
descriptor, err := ReadDescriptorEvent(buffer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := BinlogReader{
|
||||||
|
isClose: false,
|
||||||
|
descriptorEvent: *descriptor,
|
||||||
|
buffer: buffer,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
if err := opt(&reader); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &reader, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -21,9 +21,12 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/hook"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BinlogType is to distinguish different files saving different data.
|
// BinlogType is to distinguish different files saving different data.
|
||||||
@ -49,13 +52,32 @@ const (
|
|||||||
MagicNumber int32 = 0xfffabc
|
MagicNumber int32 = 0xfffabc
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (b BinlogType) String() string {
|
||||||
|
switch b {
|
||||||
|
case InsertBinlog:
|
||||||
|
return "InsertBinlog"
|
||||||
|
case DeleteBinlog:
|
||||||
|
return "DeleteBinlog"
|
||||||
|
case DDLBinlog:
|
||||||
|
return "DDLBinlog"
|
||||||
|
case IndexFileBinlog:
|
||||||
|
return "IndexFileBinlog"
|
||||||
|
case StatsBinlog:
|
||||||
|
return "StatsBinlog"
|
||||||
|
case BM25Binlog:
|
||||||
|
return "BM25"
|
||||||
|
}
|
||||||
|
return "BinlogType"
|
||||||
|
}
|
||||||
|
|
||||||
type baseBinlogWriter struct {
|
type baseBinlogWriter struct {
|
||||||
descriptorEvent
|
*descriptorEvent
|
||||||
magicNumber int32
|
magicNumber int32
|
||||||
binlogType BinlogType
|
binlogType BinlogType
|
||||||
eventWriters []EventWriter
|
eventWriters []EventWriter
|
||||||
buffer *bytes.Buffer
|
buffer *bytes.Buffer
|
||||||
length int32
|
length int32
|
||||||
|
encryptor hook.Encryptor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (writer *baseBinlogWriter) isClosed() bool {
|
func (writer *baseBinlogWriter) isClosed() bool {
|
||||||
@ -117,13 +139,17 @@ func (writer *baseBinlogWriter) Finish() error {
|
|||||||
}
|
}
|
||||||
offset += writer.descriptorEvent.GetMemoryUsageInBytes()
|
offset += writer.descriptorEvent.GetMemoryUsageInBytes()
|
||||||
|
|
||||||
|
eventBuffer := writer.buffer
|
||||||
|
if writer.encryptor != nil {
|
||||||
|
eventBuffer = new(bytes.Buffer)
|
||||||
|
}
|
||||||
writer.length = 0
|
writer.length = 0
|
||||||
for _, w := range writer.eventWriters {
|
for _, w := range writer.eventWriters {
|
||||||
w.SetOffset(offset)
|
w.SetOffset(offset)
|
||||||
if err := w.Finish(); err != nil {
|
if err := w.Finish(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.Write(writer.buffer); err != nil {
|
if err := w.Write(eventBuffer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
length, err := w.GetMemoryUsageInBytes()
|
length, err := w.GetMemoryUsageInBytes()
|
||||||
@ -137,6 +163,20 @@ func (writer *baseBinlogWriter) Finish() error {
|
|||||||
}
|
}
|
||||||
writer.length += int32(rows)
|
writer.length += int32(rows)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if writer.encryptor != nil {
|
||||||
|
encrypted, err := writer.encryptor.Encrypt(eventBuffer.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debug("Binlog writer encrypted plain text",
|
||||||
|
zap.String("writer type", writer.binlogType.String()),
|
||||||
|
zap.Int("plain size", eventBuffer.Len()),
|
||||||
|
zap.Int("cipher size", len(encrypted)))
|
||||||
|
if err := binary.Write(writer.buffer, common.Endian, encrypted); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +243,10 @@ func (writer *IndexFileBinlogWriter) NextIndexFileEventWriter() (*indexFileEvent
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewInsertBinlogWriter creates InsertBinlogWriter to write binlog file.
|
// NewInsertBinlogWriter creates InsertBinlogWriter to write binlog file.
|
||||||
func NewInsertBinlogWriter(dataType schemapb.DataType, collectionID, partitionID, segmentID, FieldID int64, nullable bool) *InsertBinlogWriter {
|
func NewInsertBinlogWriter(
|
||||||
|
dataType schemapb.DataType, collectionID, partitionID, segmentID, FieldID int64, nullable bool,
|
||||||
|
opts ...BinlogWriterOptions,
|
||||||
|
) *InsertBinlogWriter {
|
||||||
descriptorEvent := newDescriptorEvent()
|
descriptorEvent := newDescriptorEvent()
|
||||||
descriptorEvent.PayloadDataType = dataType
|
descriptorEvent.PayloadDataType = dataType
|
||||||
descriptorEvent.CollectionID = collectionID
|
descriptorEvent.CollectionID = collectionID
|
||||||
@ -213,34 +256,58 @@ func NewInsertBinlogWriter(dataType schemapb.DataType, collectionID, partitionID
|
|||||||
// store nullable in extra for compatible
|
// store nullable in extra for compatible
|
||||||
descriptorEvent.AddExtra(nullableKey, nullable)
|
descriptorEvent.AddExtra(nullableKey, nullable)
|
||||||
|
|
||||||
w := &InsertBinlogWriter{
|
baseWriter := baseBinlogWriter{
|
||||||
baseBinlogWriter: baseBinlogWriter{
|
descriptorEvent: descriptorEvent,
|
||||||
descriptorEvent: *descriptorEvent,
|
|
||||||
magicNumber: MagicNumber,
|
magicNumber: MagicNumber,
|
||||||
binlogType: InsertBinlog,
|
binlogType: InsertBinlog,
|
||||||
eventWriters: make([]EventWriter, 0),
|
eventWriters: make([]EventWriter, 0),
|
||||||
buffer: nil,
|
buffer: nil,
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&baseWriter)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &InsertBinlogWriter{
|
||||||
|
baseBinlogWriter: baseWriter,
|
||||||
|
}
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeleteBinlogWriter creates DeleteBinlogWriter to write binlog file.
|
// NewDeleteBinlogWriter creates DeleteBinlogWriter to write binlog file.
|
||||||
func NewDeleteBinlogWriter(dataType schemapb.DataType, collectionID, partitionID, segmentID int64) *DeleteBinlogWriter {
|
func NewDeleteBinlogWriter(
|
||||||
|
dataType schemapb.DataType, collectionID, partitionID, segmentID int64,
|
||||||
|
opts ...BinlogWriterOptions,
|
||||||
|
) *DeleteBinlogWriter {
|
||||||
descriptorEvent := newDescriptorEvent()
|
descriptorEvent := newDescriptorEvent()
|
||||||
descriptorEvent.PayloadDataType = dataType
|
descriptorEvent.PayloadDataType = dataType
|
||||||
descriptorEvent.CollectionID = collectionID
|
descriptorEvent.CollectionID = collectionID
|
||||||
descriptorEvent.PartitionID = partitionID
|
descriptorEvent.PartitionID = partitionID
|
||||||
descriptorEvent.SegmentID = segmentID
|
descriptorEvent.SegmentID = segmentID
|
||||||
w := &DeleteBinlogWriter{
|
|
||||||
baseBinlogWriter: baseBinlogWriter{
|
baseWriter := baseBinlogWriter{
|
||||||
descriptorEvent: *descriptorEvent,
|
descriptorEvent: descriptorEvent,
|
||||||
magicNumber: MagicNumber,
|
magicNumber: MagicNumber,
|
||||||
binlogType: DeleteBinlog,
|
binlogType: InsertBinlog,
|
||||||
eventWriters: make([]EventWriter, 0),
|
eventWriters: make([]EventWriter, 0),
|
||||||
buffer: nil,
|
buffer: nil,
|
||||||
},
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&baseWriter)
|
||||||
|
}
|
||||||
|
w := &DeleteBinlogWriter{
|
||||||
|
baseBinlogWriter: baseWriter,
|
||||||
}
|
}
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BinlogWriterOptions func(base *baseBinlogWriter)
|
||||||
|
|
||||||
|
func WithWriterEncryptionContext(ezID int64, edek []byte, encryptor hook.Encryptor) BinlogWriterOptions {
|
||||||
|
return func(base *baseBinlogWriter) {
|
||||||
|
base.AddExtra(edekKey, string(edek))
|
||||||
|
base.AddExtra(ezIDKey, ezID)
|
||||||
|
base.encryptor = encryptor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -21,10 +21,78 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestBinlogReaderWriterCipher(t *testing.T) {
|
||||||
|
hookutil.InitTestCipher()
|
||||||
|
|
||||||
|
encryptor, safeKey, err := hookutil.GetCipher().GetEncryptor(1, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, encryptor)
|
||||||
|
cypherOpts := WithWriterEncryptionContext(1, safeKey, encryptor)
|
||||||
|
|
||||||
|
binlogWriter := NewInsertBinlogWriter(schemapb.DataType_Int32, 10, 20, 30, 40, false, cypherOpts)
|
||||||
|
binlogWriter.SetEventTimeStamp(1000, 2000)
|
||||||
|
|
||||||
|
eventWriter, err := binlogWriter.NextInsertEventWriter()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = eventWriter.AddInt32ToPayload([]int32{1, 2, 3}, nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
eventWriter.SetEventTimestamp(1000, 2000)
|
||||||
|
nums, err := binlogWriter.GetRowNums()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 3, nums)
|
||||||
|
sizeTotal := 20000000
|
||||||
|
binlogWriter.baseBinlogWriter.descriptorEventData.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
|
||||||
|
err = binlogWriter.Finish()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
storedEdek, ok := binlogWriter.descriptorEvent.GetEdek()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.EqualValues(t, safeKey, storedEdek)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 1, binlogWriter.GetEventNums())
|
||||||
|
nums, err = binlogWriter.GetRowNums()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 3, nums)
|
||||||
|
|
||||||
|
buffer, err := binlogWriter.GetBuffer()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotEmpty(t, buffer)
|
||||||
|
binlogWriter.Close()
|
||||||
|
|
||||||
|
// Test reader
|
||||||
|
binlogReader, err := NewBinlogReader(buffer, WithReaderDecryptionContext(1, 1))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
log.Info("binlogReader", zap.Any("descriptorEvent", binlogReader.descriptorEvent))
|
||||||
|
|
||||||
|
gotsafeKey, ok := binlogReader.descriptorEvent.GetEdek()
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.EqualValues(t, safeKey, gotsafeKey)
|
||||||
|
|
||||||
|
eventReader, err := binlogReader.NextEventReader()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
_, _, err = eventReader.GetInt8FromPayload()
|
||||||
|
assert.Error(t, err)
|
||||||
|
payload, _, err := eventReader.GetInt32FromPayload()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 3, len(payload))
|
||||||
|
assert.EqualValues(t, 1, payload[0])
|
||||||
|
assert.EqualValues(t, 2, payload[1])
|
||||||
|
assert.EqualValues(t, 3, payload[2])
|
||||||
|
|
||||||
|
reader, err := binlogReader.NextEventReader()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, reader)
|
||||||
|
}
|
||||||
|
|
||||||
func TestBinlogWriterReader(t *testing.T) {
|
func TestBinlogWriterReader(t *testing.T) {
|
||||||
binlogWriter := NewInsertBinlogWriter(schemapb.DataType_Int32, 10, 20, 30, 40, false)
|
binlogWriter := NewInsertBinlogWriter(schemapb.DataType_Int32, 10, 20, 30, 40, false)
|
||||||
tp := binlogWriter.GetBinlogType()
|
tp := binlogWriter.GetBinlogType()
|
||||||
|
|||||||
@ -28,6 +28,7 @@ import (
|
|||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/internal/json"
|
"github.com/milvus-io/milvus/internal/json"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
||||||
@ -255,6 +256,17 @@ func (insertCodec *InsertCodec) Serialize(partitionID UniqueID, segmentID Unique
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binlogWriterOpts := []BinlogWriterOptions{}
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
if ez := hookutil.GetEzByCollProperties(insertCodec.Schema.GetSchema().GetProperties(), insertCodec.Schema.ID); ez != nil {
|
||||||
|
encryptor, safeKey, err := hookutil.GetCipher().GetEncryptor(ez.EzID, ez.CollectionID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
binlogWriterOpts = append(binlogWriterOpts, WithWriterEncryptionContext(ez.EzID, safeKey, encryptor))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
serializeField := func(field *schemapb.FieldSchema) error {
|
serializeField := func(field *schemapb.FieldSchema) error {
|
||||||
// check insert data contain this field
|
// check insert data contain this field
|
||||||
// must be all missing or all exists
|
// must be all missing or all exists
|
||||||
@ -280,19 +292,18 @@ func (insertCodec *InsertCodec) Serialize(partitionID UniqueID, segmentID Unique
|
|||||||
}
|
}
|
||||||
|
|
||||||
// encode fields
|
// encode fields
|
||||||
writer = NewInsertBinlogWriter(field.DataType, insertCodec.Schema.ID, partitionID, segmentID, field.FieldID, field.GetNullable())
|
writer = NewInsertBinlogWriter(field.DataType, insertCodec.Schema.ID, partitionID, segmentID, field.FieldID, field.GetNullable(), binlogWriterOpts...)
|
||||||
|
|
||||||
// get payload writing configs, including nullable and fallback encoding method
|
// get payload writing configs, including nullable and fallback encoding method
|
||||||
opts := []PayloadWriterOptions{WithNullable(field.GetNullable()), WithWriterProps(getFieldWriterProps(field))}
|
payloadWriterOpts := []PayloadWriterOptions{WithNullable(field.GetNullable()), WithWriterProps(getFieldWriterProps(field))}
|
||||||
|
|
||||||
if typeutil.IsVectorType(field.DataType) && !typeutil.IsSparseFloatVectorType(field.DataType) {
|
if typeutil.IsVectorType(field.DataType) && !typeutil.IsSparseFloatVectorType(field.DataType) {
|
||||||
dim, err := typeutil.GetDim(field)
|
dim, err := typeutil.GetDim(field)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts = append(opts, WithDim(int(dim)))
|
payloadWriterOpts = append(payloadWriterOpts, WithDim(int(dim)))
|
||||||
}
|
}
|
||||||
eventWriter, err := writer.NextInsertEventWriter(opts...)
|
eventWriter, err := writer.NextInsertEventWriter(payloadWriterOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writer.Close()
|
writer.Close()
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -32,15 +32,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
version = "version"
|
||||||
originalSizeKey = "original_size"
|
originalSizeKey = "original_size"
|
||||||
nullableKey = "nullable"
|
nullableKey = "nullable"
|
||||||
|
edekKey = "edek"
|
||||||
|
ezIDKey = "encryption_zone"
|
||||||
|
|
||||||
|
// mark useMultiFieldFormat if there are multi fields in a log file
|
||||||
|
MultiField = "MULTI_FIELD"
|
||||||
)
|
)
|
||||||
|
|
||||||
const version = "version"
|
|
||||||
|
|
||||||
// mark useMultiFieldFormat if there are multi fields in a log file
|
|
||||||
const MultiField = "MULTI_FIELD"
|
|
||||||
|
|
||||||
type descriptorEventData struct {
|
type descriptorEventData struct {
|
||||||
DescriptorEventDataFixPart
|
DescriptorEventDataFixPart
|
||||||
ExtraLength int32
|
ExtraLength int32
|
||||||
@ -85,6 +86,30 @@ func (data *descriptorEventData) GetNullable() (bool, error) {
|
|||||||
return nullable, nil
|
return nullable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (data *descriptorEventData) GetEdek() (string, bool) {
|
||||||
|
edek, ok := data.Extras[edekKey]
|
||||||
|
// previous descriptorEventData not store edek
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// won't be not ok, already checked format when write with FinishExtra
|
||||||
|
edekStr, _ := edek.(string)
|
||||||
|
return edekStr, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (data *descriptorEventData) GetEzID() (int64, bool) {
|
||||||
|
ezidInterface, ok := data.Extras[ezIDKey]
|
||||||
|
// previous descriptorEventData not store edek
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// won't be not ok, already checked format when write with FinishExtra
|
||||||
|
ezid, _ := ezidInterface.(int64)
|
||||||
|
return ezid, true
|
||||||
|
}
|
||||||
|
|
||||||
// GetMemoryUsageInBytes returns the memory size of DescriptorEventDataFixPart.
|
// GetMemoryUsageInBytes returns the memory size of DescriptorEventDataFixPart.
|
||||||
func (data *descriptorEventData) GetMemoryUsageInBytes() int32 {
|
func (data *descriptorEventData) GetMemoryUsageInBytes() int32 {
|
||||||
return data.GetEventDataFixPartSize() + int32(binary.Size(data.PostHeaderLengths)) + int32(binary.Size(data.ExtraLength)) + data.ExtraLength
|
return data.GetEventDataFixPartSize() + int32(binary.Size(data.PostHeaderLengths)) + int32(binary.Size(data.ExtraLength)) + data.ExtraLength
|
||||||
@ -124,6 +149,21 @@ func (data *descriptorEventData) FinishExtra() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
edekStored, exist := data.Extras[edekKey]
|
||||||
|
if exist {
|
||||||
|
_, ok := edekStored.(string)
|
||||||
|
if !ok {
|
||||||
|
return merr.WrapErrParameterInvalidMsg(fmt.Sprintf("value of %v must in string format", edekKey))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ezIDStored, exist := data.Extras[ezIDKey]
|
||||||
|
if exist {
|
||||||
|
_, ok := ezIDStored.(int64)
|
||||||
|
if !ok {
|
||||||
|
return merr.WrapErrParameterInvalidMsg(fmt.Sprintf("value of %v must in int64 format", ezIDKey))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data.ExtraBytes, err = json.Marshal(data.Extras)
|
data.ExtraBytes, err = json.Marshal(data.Extras)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -360,7 +360,7 @@ func NewIndexFileBinlogWriter(
|
|||||||
descriptorEvent.AddExtra("key", key)
|
descriptorEvent.AddExtra("key", key)
|
||||||
w := &IndexFileBinlogWriter{
|
w := &IndexFileBinlogWriter{
|
||||||
baseBinlogWriter: baseBinlogWriter{
|
baseBinlogWriter: baseBinlogWriter{
|
||||||
descriptorEvent: *descriptorEvent,
|
descriptorEvent: descriptorEvent,
|
||||||
magicNumber: MagicNumber,
|
magicNumber: MagicNumber,
|
||||||
binlogType: IndexFileBinlog,
|
binlogType: IndexFileBinlog,
|
||||||
eventWriters: make([]EventWriter, 0),
|
eventWriters: make([]EventWriter, 0),
|
||||||
|
|||||||
@ -29,7 +29,10 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/allocator"
|
"github.com/milvus-io/milvus/internal/allocator"
|
||||||
"github.com/milvus-io/milvus/internal/storagecommon"
|
"github.com/milvus-io/milvus/internal/storagecommon"
|
||||||
"github.com/milvus-io/milvus/internal/storagev2/packed"
|
"github.com/milvus-io/milvus/internal/storagev2/packed"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||||
@ -61,6 +64,7 @@ type rwOptions struct {
|
|||||||
uploader uploaderFn
|
uploader uploaderFn
|
||||||
multiPartUploadSize int64
|
multiPartUploadSize int64
|
||||||
columnGroups []storagecommon.ColumnGroup
|
columnGroups []storagecommon.ColumnGroup
|
||||||
|
collectionID int64
|
||||||
storageConfig *indexpb.StorageConfig
|
storageConfig *indexpb.StorageConfig
|
||||||
neededFields typeutil.Set[int64]
|
neededFields typeutil.Set[int64]
|
||||||
}
|
}
|
||||||
@ -69,6 +73,10 @@ func (o *rwOptions) validate() error {
|
|||||||
if o.storageConfig == nil {
|
if o.storageConfig == nil {
|
||||||
return merr.WrapErrServiceInternal("storage config is nil")
|
return merr.WrapErrServiceInternal("storage config is nil")
|
||||||
}
|
}
|
||||||
|
if o.collectionID == 0 {
|
||||||
|
log.Warn("storage config collection id is empty when init BinlogReader")
|
||||||
|
// return merr.WrapErrServiceInternal("storage config collection id is empty")
|
||||||
|
}
|
||||||
if o.op == OpWrite && o.uploader == nil {
|
if o.op == OpWrite && o.uploader == nil {
|
||||||
return merr.WrapErrServiceInternal("uploader is nil for writer")
|
return merr.WrapErrServiceInternal("uploader is nil for writer")
|
||||||
}
|
}
|
||||||
@ -101,6 +109,12 @@ func DefaultReaderOptions() *rwOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithCollectionID(collID int64) RwOption {
|
||||||
|
return func(options *rwOptions) {
|
||||||
|
options.collectionID = collID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func WithVersion(version int64) RwOption {
|
func WithVersion(version int64) RwOption {
|
||||||
return func(options *rwOptions) {
|
return func(options *rwOptions) {
|
||||||
options.version = version
|
options.version = version
|
||||||
@ -228,6 +242,23 @@ func NewBinlogRecordReader(ctx context.Context, binlogs []*datapb.FieldBinlog, s
|
|||||||
if err := rwOptions.validate(); err != nil {
|
if err := rwOptions.validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binlogReaderOpts := []BinlogReaderOption{}
|
||||||
|
var pluginContext *indexcgopb.StoragePluginContext
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
if ez := hookutil.GetEzByCollProperties(schema.GetProperties(), rwOptions.collectionID); ez != nil {
|
||||||
|
binlogReaderOpts = append(binlogReaderOpts, WithReaderDecryptionContext(ez.EzID, ez.CollectionID))
|
||||||
|
|
||||||
|
unsafe := hookutil.GetCipher().GetUnsafeKey(ez.EzID, ez.CollectionID)
|
||||||
|
if len(unsafe) > 0 {
|
||||||
|
pluginContext = &indexcgopb.StoragePluginContext{
|
||||||
|
EncryptionZoneId: ez.EzID,
|
||||||
|
CollectionId: ez.CollectionID,
|
||||||
|
EncryptionKey: string(unsafe),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
switch rwOptions.version {
|
switch rwOptions.version {
|
||||||
case StorageV1:
|
case StorageV1:
|
||||||
var blobsReader ChunkedBlobsReader
|
var blobsReader ChunkedBlobsReader
|
||||||
@ -235,8 +266,7 @@ func NewBinlogRecordReader(ctx context.Context, binlogs []*datapb.FieldBinlog, s
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
rr, err = newCompositeBinlogRecordReader(schema, blobsReader, binlogReaderOpts...)
|
||||||
rr, err = newCompositeBinlogRecordReader(schema, blobsReader)
|
|
||||||
case StorageV2:
|
case StorageV2:
|
||||||
if len(binlogs) <= 0 {
|
if len(binlogs) <= 0 {
|
||||||
return nil, sio.EOF
|
return nil, sio.EOF
|
||||||
@ -258,7 +288,7 @@ func NewBinlogRecordReader(ctx context.Context, binlogs []*datapb.FieldBinlog, s
|
|||||||
paths[j] = append(paths[j], logPath)
|
paths[j] = append(paths[j], logPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rr, err = newPackedRecordReader(paths, schema, rwOptions.bufferSize, rwOptions.storageConfig)
|
rr, err = newPackedRecordReader(paths, schema, rwOptions.bufferSize, rwOptions.storageConfig, pluginContext)
|
||||||
default:
|
default:
|
||||||
return nil, merr.WrapErrServiceInternal(fmt.Sprintf("unsupported storage version %d", rwOptions.version))
|
return nil, merr.WrapErrServiceInternal(fmt.Sprintf("unsupported storage version %d", rwOptions.version))
|
||||||
}
|
}
|
||||||
@ -276,9 +306,11 @@ func NewBinlogRecordWriter(ctx context.Context, collectionID, partitionID, segme
|
|||||||
option ...RwOption,
|
option ...RwOption,
|
||||||
) (BinlogRecordWriter, error) {
|
) (BinlogRecordWriter, error) {
|
||||||
rwOptions := DefaultWriterOptions()
|
rwOptions := DefaultWriterOptions()
|
||||||
|
option = append(option, WithCollectionID(collectionID))
|
||||||
for _, opt := range option {
|
for _, opt := range option {
|
||||||
opt(rwOptions)
|
opt(rwOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := rwOptions.validate(); err != nil {
|
if err := rwOptions.validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -290,17 +322,41 @@ func NewBinlogRecordWriter(ctx context.Context, collectionID, partitionID, segme
|
|||||||
}
|
}
|
||||||
return rwOptions.uploader(ctx, kvs)
|
return rwOptions.uploader(ctx, kvs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts := []StreamWriterOption{}
|
||||||
|
var pluginContext *indexcgopb.StoragePluginContext
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
ez := hookutil.GetEzByCollProperties(schema.GetProperties(), collectionID)
|
||||||
|
if ez != nil {
|
||||||
|
encryptor, edek, err := hookutil.GetCipher().GetEncryptor(ez.EzID, ez.CollectionID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opts = append(opts, GetEncryptionOptions(ez.EzID, edek, encryptor)...)
|
||||||
|
|
||||||
|
unsafe := hookutil.GetCipher().GetUnsafeKey(ez.EzID, ez.CollectionID)
|
||||||
|
if len(unsafe) > 0 {
|
||||||
|
pluginContext = &indexcgopb.StoragePluginContext{
|
||||||
|
EncryptionZoneId: ez.EzID,
|
||||||
|
CollectionId: ez.CollectionID,
|
||||||
|
EncryptionKey: string(unsafe),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch rwOptions.version {
|
switch rwOptions.version {
|
||||||
case StorageV1:
|
case StorageV1:
|
||||||
rootPath := rwOptions.storageConfig.GetRootPath()
|
rootPath := rwOptions.storageConfig.GetRootPath()
|
||||||
return newCompositeBinlogRecordWriter(collectionID, partitionID, segmentID, schema,
|
return newCompositeBinlogRecordWriter(collectionID, partitionID, segmentID, schema,
|
||||||
blobsWriter, allocator, chunkSize, rootPath, maxRowNum,
|
blobsWriter, allocator, chunkSize, rootPath, maxRowNum, opts...,
|
||||||
)
|
)
|
||||||
case StorageV2:
|
case StorageV2:
|
||||||
return newPackedBinlogRecordWriter(collectionID, partitionID, segmentID, schema,
|
return newPackedBinlogRecordWriter(collectionID, partitionID, segmentID, schema,
|
||||||
blobsWriter, allocator, maxRowNum,
|
blobsWriter, allocator, maxRowNum,
|
||||||
rwOptions.bufferSize, rwOptions.multiPartUploadSize, rwOptions.columnGroups,
|
rwOptions.bufferSize, rwOptions.multiPartUploadSize, rwOptions.columnGroups,
|
||||||
rwOptions.storageConfig,
|
rwOptions.storageConfig,
|
||||||
|
pluginContext,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return nil, merr.WrapErrServiceInternal(fmt.Sprintf("unsupported storage version %d", rwOptions.version))
|
return nil, merr.WrapErrServiceInternal(fmt.Sprintf("unsupported storage version %d", rwOptions.version))
|
||||||
|
|||||||
@ -30,11 +30,14 @@ import (
|
|||||||
"github.com/apache/arrow/go/v17/arrow/memory"
|
"github.com/apache/arrow/go/v17/arrow/memory"
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/hook"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
"github.com/milvus-io/milvus/internal/allocator"
|
"github.com/milvus-io/milvus/internal/allocator"
|
||||||
"github.com/milvus-io/milvus/internal/json"
|
"github.com/milvus-io/milvus/internal/json"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
@ -54,6 +57,7 @@ type CompositeBinlogRecordReader struct {
|
|||||||
index map[FieldID]int16
|
index map[FieldID]int16
|
||||||
|
|
||||||
brs []*BinlogReader
|
brs []*BinlogReader
|
||||||
|
bropts []BinlogReaderOption
|
||||||
rrs []array.RecordReader
|
rrs []array.RecordReader
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,7 +91,7 @@ func (crr *CompositeBinlogRecordReader) iterateNextBatch() error {
|
|||||||
crr.brs = make([]*BinlogReader, fieldNum)
|
crr.brs = make([]*BinlogReader, fieldNum)
|
||||||
|
|
||||||
for _, b := range blobs {
|
for _, b := range blobs {
|
||||||
reader, err := NewBinlogReader(b.Value)
|
reader, err := NewBinlogReader(b.Value, crr.bropts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -253,7 +257,7 @@ func MakeBlobsReader(blobs []*Blob) ChunkedBlobsReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCompositeBinlogRecordReader(schema *schemapb.CollectionSchema, blobsReader ChunkedBlobsReader) (*CompositeBinlogRecordReader, error) {
|
func newCompositeBinlogRecordReader(schema *schemapb.CollectionSchema, blobsReader ChunkedBlobsReader, opts ...BinlogReaderOption) (*CompositeBinlogRecordReader, error) {
|
||||||
idx := 0
|
idx := 0
|
||||||
index := make(map[FieldID]int16)
|
index := make(map[FieldID]int16)
|
||||||
for _, f := range schema.Fields {
|
for _, f := range schema.Fields {
|
||||||
@ -266,10 +270,12 @@ func newCompositeBinlogRecordReader(schema *schemapb.CollectionSchema, blobsRead
|
|||||||
idx++
|
idx++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &CompositeBinlogRecordReader{
|
return &CompositeBinlogRecordReader{
|
||||||
schema: schema,
|
schema: schema,
|
||||||
BlobsReader: blobsReader,
|
BlobsReader: blobsReader,
|
||||||
index: index,
|
index: index,
|
||||||
|
bropts: opts,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,6 +388,36 @@ func newDeltalogOneFieldReader(blobs []*Blob) (*DeserializeReaderImpl[*DeleteLog
|
|||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HeaderExtraWriterOption func(header *descriptorEvent)
|
||||||
|
|
||||||
|
func WithEncryptionKey(ezID int64, edek []byte) HeaderExtraWriterOption {
|
||||||
|
return func(header *descriptorEvent) {
|
||||||
|
header.AddExtra(edekKey, string(edek))
|
||||||
|
header.AddExtra(ezIDKey, ezID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StreamWriterOption func(*BinlogStreamWriter)
|
||||||
|
|
||||||
|
func WithEncryptor(encryptor hook.Encryptor) StreamWriterOption {
|
||||||
|
return func(w *BinlogStreamWriter) {
|
||||||
|
w.encryptor = encryptor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithHeaderExtraOptions(headerOpt HeaderExtraWriterOption) StreamWriterOption {
|
||||||
|
return func(w *BinlogStreamWriter) {
|
||||||
|
w.headerOpt = headerOpt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetEncryptionOptions(ezID int64, edek []byte, encryptor hook.Encryptor) []StreamWriterOption {
|
||||||
|
return []StreamWriterOption{
|
||||||
|
WithEncryptor(encryptor),
|
||||||
|
WithHeaderExtraOptions(WithEncryptionKey(ezID, edek)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type BinlogStreamWriter struct {
|
type BinlogStreamWriter struct {
|
||||||
collectionID UniqueID
|
collectionID UniqueID
|
||||||
partitionID UniqueID
|
partitionID UniqueID
|
||||||
@ -390,6 +426,8 @@ type BinlogStreamWriter struct {
|
|||||||
|
|
||||||
buf bytes.Buffer
|
buf bytes.Buffer
|
||||||
rw *singleFieldRecordWriter
|
rw *singleFieldRecordWriter
|
||||||
|
headerOpt HeaderExtraWriterOption
|
||||||
|
encryptor hook.Encryptor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bsw *BinlogStreamWriter) GetRecordWriter() (RecordWriter, error) {
|
func (bsw *BinlogStreamWriter) GetRecordWriter() (RecordWriter, error) {
|
||||||
@ -415,9 +453,55 @@ func (bsw *BinlogStreamWriter) Finalize() (*Blob, error) {
|
|||||||
if err := bsw.writeBinlogHeaders(&b); err != nil {
|
if err := bsw.writeBinlogHeaders(&b); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, err := b.Write(bsw.buf.Bytes()); err != nil {
|
|
||||||
|
// Everything but descryptor event is encrypted
|
||||||
|
tmpBuf := &b
|
||||||
|
if bsw.encryptor != nil {
|
||||||
|
tmpBuf = &bytes.Buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
eh := newEventHeader(InsertEventType)
|
||||||
|
|
||||||
|
ev := newInsertEventData()
|
||||||
|
ev.StartTimestamp = 1
|
||||||
|
ev.EndTimestamp = 1
|
||||||
|
eh.EventLength = int32(bsw.buf.Len()) + eh.GetMemoryUsageInBytes() + int32(binary.Size(ev))
|
||||||
|
// eh.NextPosition = eh.EventLength + w.Offset()
|
||||||
|
|
||||||
|
// Write event header
|
||||||
|
if err := eh.Write(tmpBuf); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write event data, which ic startTs and endTs for insert event data
|
||||||
|
if err := ev.WriteEventData(tmpBuf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binary.Write(tmpBuf, common.Endian, bsw.buf.Bytes()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// tmpBuf could be "b" or new "tmpBuf"
|
||||||
|
// if encryptor is not nil, tmpBuf is new tmpBuf, which need to be written into
|
||||||
|
// b after encryption
|
||||||
|
if bsw.encryptor != nil {
|
||||||
|
cipherText, err := bsw.encryptor.Encrypt(tmpBuf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debug("Binlog stream writer encrypted cipher text",
|
||||||
|
zap.Int64("collectionID", bsw.collectionID),
|
||||||
|
zap.Int64("segmentID", bsw.segmentID),
|
||||||
|
zap.Int64("fieldID", bsw.fieldSchema.FieldID),
|
||||||
|
zap.Int("plain size", tmpBuf.Len()),
|
||||||
|
zap.Int("cipher size", len(cipherText)),
|
||||||
|
)
|
||||||
|
if err := binary.Write(&b, common.Endian, cipherText); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return &Blob{
|
return &Blob{
|
||||||
Key: strconv.Itoa(int(bsw.fieldSchema.FieldID)),
|
Key: strconv.Itoa(int(bsw.fieldSchema.FieldID)),
|
||||||
Value: b.Bytes(),
|
Value: b.Bytes(),
|
||||||
@ -437,23 +521,13 @@ func (bsw *BinlogStreamWriter) writeBinlogHeaders(w io.Writer) error {
|
|||||||
de.FieldID = bsw.fieldSchema.FieldID
|
de.FieldID = bsw.fieldSchema.FieldID
|
||||||
de.descriptorEventData.AddExtra(originalSizeKey, strconv.Itoa(int(bsw.rw.writtenUncompressed)))
|
de.descriptorEventData.AddExtra(originalSizeKey, strconv.Itoa(int(bsw.rw.writtenUncompressed)))
|
||||||
de.descriptorEventData.AddExtra(nullableKey, bsw.fieldSchema.Nullable)
|
de.descriptorEventData.AddExtra(nullableKey, bsw.fieldSchema.Nullable)
|
||||||
|
// Additional head options
|
||||||
|
if bsw.headerOpt != nil {
|
||||||
|
bsw.headerOpt(de)
|
||||||
|
}
|
||||||
if err := de.Write(w); err != nil {
|
if err := de.Write(w); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Write event header
|
|
||||||
eh := newEventHeader(InsertEventType)
|
|
||||||
// Write event data
|
|
||||||
ev := newInsertEventData()
|
|
||||||
ev.StartTimestamp = 1
|
|
||||||
ev.EndTimestamp = 1
|
|
||||||
eh.EventLength = int32(bsw.buf.Len()) + eh.GetMemoryUsageInBytes() + int32(binary.Size(ev))
|
|
||||||
// eh.NextPosition = eh.EventLength + w.Offset()
|
|
||||||
if err := eh.Write(w); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := ev.WriteEventData(w); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -470,16 +544,25 @@ func newBinlogWriter(collectionID, partitionID, segmentID UniqueID,
|
|||||||
|
|
||||||
func NewBinlogStreamWriters(collectionID, partitionID, segmentID UniqueID,
|
func NewBinlogStreamWriters(collectionID, partitionID, segmentID UniqueID,
|
||||||
schema *schemapb.CollectionSchema,
|
schema *schemapb.CollectionSchema,
|
||||||
|
writerOptions ...StreamWriterOption,
|
||||||
) map[FieldID]*BinlogStreamWriter {
|
) map[FieldID]*BinlogStreamWriter {
|
||||||
bws := make(map[FieldID]*BinlogStreamWriter)
|
bws := make(map[FieldID]*BinlogStreamWriter)
|
||||||
|
|
||||||
for _, f := range schema.Fields {
|
for _, f := range schema.Fields {
|
||||||
bws[f.FieldID] = newBinlogWriter(collectionID, partitionID, segmentID, f)
|
writer := newBinlogWriter(collectionID, partitionID, segmentID, f)
|
||||||
|
for _, writerOption := range writerOptions {
|
||||||
|
writerOption(writer)
|
||||||
|
}
|
||||||
|
bws[f.FieldID] = writer
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, structField := range schema.StructArrayFields {
|
for _, structField := range schema.StructArrayFields {
|
||||||
for _, subField := range structField.Fields {
|
for _, subField := range structField.Fields {
|
||||||
bws[subField.FieldID] = newBinlogWriter(collectionID, partitionID, segmentID, subField)
|
writer := newBinlogWriter(collectionID, partitionID, segmentID, subField)
|
||||||
|
for _, writerOption := range writerOptions {
|
||||||
|
writerOption(writer)
|
||||||
|
}
|
||||||
|
bws[subField.FieldID] = writer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -570,6 +653,7 @@ type CompositeBinlogRecordWriter struct {
|
|||||||
bm25StatsLog map[FieldID]*datapb.FieldBinlog
|
bm25StatsLog map[FieldID]*datapb.FieldBinlog
|
||||||
|
|
||||||
flushedUncompressed uint64
|
flushedUncompressed uint64
|
||||||
|
options []StreamWriterOption
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ BinlogRecordWriter = (*CompositeBinlogRecordWriter)(nil)
|
var _ BinlogRecordWriter = (*CompositeBinlogRecordWriter)(nil)
|
||||||
@ -631,7 +715,7 @@ func (c *CompositeBinlogRecordWriter) Write(r Record) error {
|
|||||||
|
|
||||||
func (c *CompositeBinlogRecordWriter) initWriters() error {
|
func (c *CompositeBinlogRecordWriter) initWriters() error {
|
||||||
if c.rw == nil {
|
if c.rw == nil {
|
||||||
c.fieldWriters = NewBinlogStreamWriters(c.collectionID, c.partitionID, c.segmentID, c.schema)
|
c.fieldWriters = NewBinlogStreamWriters(c.collectionID, c.partitionID, c.segmentID, c.schema, c.options...)
|
||||||
rws := make(map[FieldID]RecordWriter, len(c.fieldWriters))
|
rws := make(map[FieldID]RecordWriter, len(c.fieldWriters))
|
||||||
for fid, w := range c.fieldWriters {
|
for fid, w := range c.fieldWriters {
|
||||||
rw, err := w.GetRecordWriter()
|
rw, err := w.GetRecordWriter()
|
||||||
@ -837,6 +921,7 @@ func (c *CompositeBinlogRecordWriter) GetRowNum() int64 {
|
|||||||
|
|
||||||
func newCompositeBinlogRecordWriter(collectionID, partitionID, segmentID UniqueID, schema *schemapb.CollectionSchema,
|
func newCompositeBinlogRecordWriter(collectionID, partitionID, segmentID UniqueID, schema *schemapb.CollectionSchema,
|
||||||
blobsWriter ChunkedBlobsWriter, allocator allocator.Interface, chunkSize uint64, rootPath string, maxRowNum int64,
|
blobsWriter ChunkedBlobsWriter, allocator allocator.Interface, chunkSize uint64, rootPath string, maxRowNum int64,
|
||||||
|
options ...StreamWriterOption,
|
||||||
) (*CompositeBinlogRecordWriter, error) {
|
) (*CompositeBinlogRecordWriter, error) {
|
||||||
pkField, err := typeutil.GetPrimaryFieldSchema(schema)
|
pkField, err := typeutil.GetPrimaryFieldSchema(schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -869,6 +954,7 @@ func newCompositeBinlogRecordWriter(collectionID, partitionID, segmentID UniqueI
|
|||||||
maxRowNum: maxRowNum,
|
maxRowNum: maxRowNum,
|
||||||
pkstats: stats,
|
pkstats: stats,
|
||||||
bm25Stats: bm25Stats,
|
bm25Stats: bm25Stats,
|
||||||
|
options: options,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -34,6 +34,7 @@ import (
|
|||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/etcdpb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
"github.com/milvus-io/milvus/pkg/v2/util/metautil"
|
||||||
@ -50,6 +51,7 @@ type packedRecordReader struct {
|
|||||||
arrowSchema *arrow.Schema
|
arrowSchema *arrow.Schema
|
||||||
field2Col map[FieldID]int
|
field2Col map[FieldID]int
|
||||||
storageConfig *indexpb.StorageConfig
|
storageConfig *indexpb.StorageConfig
|
||||||
|
storagePluginContext *indexcgopb.StoragePluginContext
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ RecordReader = (*packedRecordReader)(nil)
|
var _ RecordReader = (*packedRecordReader)(nil)
|
||||||
@ -65,10 +67,10 @@ func (pr *packedRecordReader) iterateNextBatch() error {
|
|||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := packed.NewPackedReader(pr.paths[pr.chunk], pr.arrowSchema, pr.bufferSize, pr.storageConfig)
|
reader, err := packed.NewPackedReader(pr.paths[pr.chunk], pr.arrowSchema, pr.bufferSize, pr.storageConfig, pr.storagePluginContext)
|
||||||
pr.chunk++
|
pr.chunk++
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return merr.WrapErrParameterInvalid("New binlog record packed reader error: %s", err.Error())
|
return errors.Newf("New binlog record packed reader error: %w", err)
|
||||||
}
|
}
|
||||||
pr.reader = reader
|
pr.reader = reader
|
||||||
return nil
|
return nil
|
||||||
@ -107,7 +109,7 @@ func (pr *packedRecordReader) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPackedRecordReader(paths [][]string, schema *schemapb.CollectionSchema, bufferSize int64, storageConfig *indexpb.StorageConfig,
|
func newPackedRecordReader(paths [][]string, schema *schemapb.CollectionSchema, bufferSize int64, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext,
|
||||||
) (*packedRecordReader, error) {
|
) (*packedRecordReader, error) {
|
||||||
arrowSchema, err := ConvertToArrowSchema(schema)
|
arrowSchema, err := ConvertToArrowSchema(schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -124,13 +126,15 @@ func newPackedRecordReader(paths [][]string, schema *schemapb.CollectionSchema,
|
|||||||
arrowSchema: arrowSchema,
|
arrowSchema: arrowSchema,
|
||||||
field2Col: field2Col,
|
field2Col: field2Col,
|
||||||
storageConfig: storageConfig,
|
storageConfig: storageConfig,
|
||||||
|
storagePluginContext: storagePluginContext,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
func NewPackedDeserializeReader(paths [][]string, schema *schemapb.CollectionSchema,
|
func NewPackedDeserializeReader(paths [][]string, schema *schemapb.CollectionSchema,
|
||||||
bufferSize int64, shouldCopy bool,
|
bufferSize int64, shouldCopy bool,
|
||||||
) (*DeserializeReaderImpl[*Value], error) {
|
) (*DeserializeReaderImpl[*Value], error) {
|
||||||
reader, err := newPackedRecordReader(paths, schema, bufferSize, nil)
|
reader, err := newPackedRecordReader(paths, schema, bufferSize, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -213,7 +217,7 @@ func (pw *packedRecordWriter) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPackedRecordWriter(bucketName string, paths []string, schema *schemapb.CollectionSchema, bufferSize int64, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, storageConfig *indexpb.StorageConfig) (*packedRecordWriter, error) {
|
func NewPackedRecordWriter(bucketName string, paths []string, schema *schemapb.CollectionSchema, bufferSize int64, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*packedRecordWriter, error) {
|
||||||
arrowSchema, err := ConvertToArrowSchema(schema)
|
arrowSchema, err := ConvertToArrowSchema(schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, merr.WrapErrServiceInternal(
|
return nil, merr.WrapErrServiceInternal(
|
||||||
@ -232,7 +236,7 @@ func NewPackedRecordWriter(bucketName string, paths []string, schema *schemapb.C
|
|||||||
}
|
}
|
||||||
return path.Join(bucketName, p)
|
return path.Join(bucketName, p)
|
||||||
})
|
})
|
||||||
writer, err := packed.NewPackedWriter(truePaths, arrowSchema, bufferSize, multiPartUploadSize, columnGroups, storageConfig)
|
writer, err := packed.NewPackedWriter(truePaths, arrowSchema, bufferSize, multiPartUploadSize, columnGroups, storageConfig, storagePluginContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, merr.WrapErrServiceInternal(
|
return nil, merr.WrapErrServiceInternal(
|
||||||
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
||||||
@ -260,10 +264,11 @@ func NewPackedRecordWriter(bucketName string, paths []string, schema *schemapb.C
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deprecated, todo remove
|
||||||
func NewPackedSerializeWriter(bucketName string, paths []string, schema *schemapb.CollectionSchema, bufferSize int64,
|
func NewPackedSerializeWriter(bucketName string, paths []string, schema *schemapb.CollectionSchema, bufferSize int64,
|
||||||
multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, batchSize int,
|
multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, batchSize int,
|
||||||
) (*SerializeWriterImpl[*Value], error) {
|
) (*SerializeWriterImpl[*Value], error) {
|
||||||
packedRecordWriter, err := NewPackedRecordWriter(bucketName, paths, schema, bufferSize, multiPartUploadSize, columnGroups, nil)
|
packedRecordWriter, err := NewPackedRecordWriter(bucketName, paths, schema, bufferSize, multiPartUploadSize, columnGroups, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, merr.WrapErrServiceInternal(
|
return nil, merr.WrapErrServiceInternal(
|
||||||
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
||||||
@ -289,6 +294,7 @@ type PackedBinlogRecordWriter struct {
|
|||||||
multiPartUploadSize int64
|
multiPartUploadSize int64
|
||||||
columnGroups []storagecommon.ColumnGroup
|
columnGroups []storagecommon.ColumnGroup
|
||||||
storageConfig *indexpb.StorageConfig
|
storageConfig *indexpb.StorageConfig
|
||||||
|
storagePluginContext *indexcgopb.StoragePluginContext
|
||||||
|
|
||||||
// writer and stats generated at runtime
|
// writer and stats generated at runtime
|
||||||
writer *packedRecordWriter
|
writer *packedRecordWriter
|
||||||
@ -370,7 +376,7 @@ func (pw *PackedBinlogRecordWriter) initWriters(r Record) error {
|
|||||||
paths = append(paths, path)
|
paths = append(paths, path)
|
||||||
logIdStart++
|
logIdStart++
|
||||||
}
|
}
|
||||||
pw.writer, err = NewPackedRecordWriter(pw.storageConfig.GetBucketName(), paths, pw.schema, pw.bufferSize, pw.multiPartUploadSize, pw.columnGroups, pw.storageConfig)
|
pw.writer, err = NewPackedRecordWriter(pw.storageConfig.GetBucketName(), paths, pw.schema, pw.bufferSize, pw.multiPartUploadSize, pw.columnGroups, pw.storageConfig, pw.storagePluginContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return merr.WrapErrServiceInternal(fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
return merr.WrapErrServiceInternal(fmt.Sprintf("can not new packed record writer %s", err.Error()))
|
||||||
}
|
}
|
||||||
@ -540,6 +546,7 @@ func (pw *PackedBinlogRecordWriter) GetBufferUncompressed() uint64 {
|
|||||||
func newPackedBinlogRecordWriter(collectionID, partitionID, segmentID UniqueID, schema *schemapb.CollectionSchema,
|
func newPackedBinlogRecordWriter(collectionID, partitionID, segmentID UniqueID, schema *schemapb.CollectionSchema,
|
||||||
blobsWriter ChunkedBlobsWriter, allocator allocator.Interface, maxRowNum int64, bufferSize, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup,
|
blobsWriter ChunkedBlobsWriter, allocator allocator.Interface, maxRowNum int64, bufferSize, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup,
|
||||||
storageConfig *indexpb.StorageConfig,
|
storageConfig *indexpb.StorageConfig,
|
||||||
|
storagePluginContext *indexcgopb.StoragePluginContext,
|
||||||
) (*PackedBinlogRecordWriter, error) {
|
) (*PackedBinlogRecordWriter, error) {
|
||||||
arrowSchema, err := ConvertToArrowSchema(schema)
|
arrowSchema, err := ConvertToArrowSchema(schema)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -580,6 +587,7 @@ func newPackedBinlogRecordWriter(collectionID, partitionID, segmentID UniqueID,
|
|||||||
pkstats: stats,
|
pkstats: stats,
|
||||||
bm25Stats: bm25Stats,
|
bm25Stats: bm25Stats,
|
||||||
storageConfig: storageConfig,
|
storageConfig: storageConfig,
|
||||||
|
storagePluginContext: storagePluginContext,
|
||||||
|
|
||||||
tsFrom: typeutil.MaxTimestamp,
|
tsFrom: typeutil.MaxTimestamp,
|
||||||
tsTo: 0,
|
tsTo: 0,
|
||||||
|
|||||||
@ -32,10 +32,11 @@ import (
|
|||||||
"github.com/apache/arrow/go/v17/arrow"
|
"github.com/apache/arrow/go/v17/arrow"
|
||||||
"github.com/apache/arrow/go/v17/arrow/cdata"
|
"github.com/apache/arrow/go/v17/arrow/cdata"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewPackedReader(filePaths []string, schema *arrow.Schema, bufferSize int64, storageConfig *indexpb.StorageConfig) (*PackedReader, error) {
|
func NewPackedReader(filePaths []string, schema *arrow.Schema, bufferSize int64, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*PackedReader, error) {
|
||||||
cFilePaths := make([]*C.char, len(filePaths))
|
cFilePaths := make([]*C.char, len(filePaths))
|
||||||
for i, path := range filePaths {
|
for i, path := range filePaths {
|
||||||
cFilePaths[i] = C.CString(path)
|
cFilePaths[i] = C.CString(path)
|
||||||
@ -54,6 +55,17 @@ func NewPackedReader(filePaths []string, schema *arrow.Schema, bufferSize int64,
|
|||||||
var cPackedReader C.CPackedReader
|
var cPackedReader C.CPackedReader
|
||||||
var status C.CStatus
|
var status C.CStatus
|
||||||
|
|
||||||
|
var pluginContextPtr *C.CPluginContext
|
||||||
|
if storagePluginContext != nil {
|
||||||
|
ckey := C.CString(storagePluginContext.EncryptionKey)
|
||||||
|
defer C.free(unsafe.Pointer(ckey))
|
||||||
|
var pluginContext C.CPluginContext
|
||||||
|
pluginContext.ez_id = C.int64_t(storagePluginContext.EncryptionZoneId)
|
||||||
|
pluginContext.collection_id = C.int64_t(storagePluginContext.CollectionId)
|
||||||
|
pluginContext.key = ckey
|
||||||
|
pluginContextPtr = &pluginContext
|
||||||
|
}
|
||||||
|
|
||||||
if storageConfig != nil {
|
if storageConfig != nil {
|
||||||
cStorageConfig := C.CStorageConfig{
|
cStorageConfig := C.CStorageConfig{
|
||||||
address: C.CString(storageConfig.GetAddress()),
|
address: C.CString(storageConfig.GetAddress()),
|
||||||
@ -87,9 +99,9 @@ func NewPackedReader(filePaths []string, schema *arrow.Schema, bufferSize int64,
|
|||||||
defer C.free(unsafe.Pointer(cStorageConfig.region))
|
defer C.free(unsafe.Pointer(cStorageConfig.region))
|
||||||
defer C.free(unsafe.Pointer(cStorageConfig.gcp_credential_json))
|
defer C.free(unsafe.Pointer(cStorageConfig.gcp_credential_json))
|
||||||
|
|
||||||
status = C.NewPackedReaderWithStorageConfig(cFilePathsArray, cNumPaths, cSchema, cBufferSize, cStorageConfig, &cPackedReader)
|
status = C.NewPackedReaderWithStorageConfig(cFilePathsArray, cNumPaths, cSchema, cBufferSize, cStorageConfig, &cPackedReader, pluginContextPtr)
|
||||||
} else {
|
} else {
|
||||||
status = C.NewPackedReader(cFilePathsArray, cNumPaths, cSchema, cBufferSize, &cPackedReader)
|
status = C.NewPackedReader(cFilePathsArray, cNumPaths, cSchema, cBufferSize, &cPackedReader, pluginContextPtr)
|
||||||
}
|
}
|
||||||
if err := ConsumeCStatusIntoError(&status); err != nil {
|
if err := ConsumeCStatusIntoError(&status); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -82,7 +82,7 @@ func (suite *PackedTestSuite) TestPackedOneFile() {
|
|||||||
columnGroups := []storagecommon.ColumnGroup{{Columns: []int{0, 1, 2}, GroupID: storagecommon.DefaultShortColumnGroupID}}
|
columnGroups := []storagecommon.ColumnGroup{{Columns: []int{0, 1, 2}, GroupID: storagecommon.DefaultShortColumnGroupID}}
|
||||||
bufferSize := int64(10 * 1024 * 1024) // 10MB
|
bufferSize := int64(10 * 1024 * 1024) // 10MB
|
||||||
multiPartUploadSize := int64(0)
|
multiPartUploadSize := int64(0)
|
||||||
pw, err := NewPackedWriter(paths, suite.schema, bufferSize, multiPartUploadSize, columnGroups, nil)
|
pw, err := NewPackedWriter(paths, suite.schema, bufferSize, multiPartUploadSize, columnGroups, nil, nil)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
for i := 0; i < batches; i++ {
|
for i := 0; i < batches; i++ {
|
||||||
err = pw.WriteRecordBatch(suite.rec)
|
err = pw.WriteRecordBatch(suite.rec)
|
||||||
@ -91,7 +91,7 @@ func (suite *PackedTestSuite) TestPackedOneFile() {
|
|||||||
err = pw.Close()
|
err = pw.Close()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|
||||||
reader, err := NewPackedReader(paths, suite.schema, bufferSize, nil)
|
reader, err := NewPackedReader(paths, suite.schema, bufferSize, nil, nil)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
rr, err := reader.ReadNext()
|
rr, err := reader.ReadNext()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
@ -134,7 +134,7 @@ func (suite *PackedTestSuite) TestPackedMultiFiles() {
|
|||||||
columnGroups := []storagecommon.ColumnGroup{{Columns: []int{2}, GroupID: 2}, {Columns: []int{0, 1}, GroupID: storagecommon.DefaultShortColumnGroupID}}
|
columnGroups := []storagecommon.ColumnGroup{{Columns: []int{2}, GroupID: 2}, {Columns: []int{0, 1}, GroupID: storagecommon.DefaultShortColumnGroupID}}
|
||||||
bufferSize := int64(10 * 1024 * 1024) // 10MB
|
bufferSize := int64(10 * 1024 * 1024) // 10MB
|
||||||
multiPartUploadSize := int64(0)
|
multiPartUploadSize := int64(0)
|
||||||
pw, err := NewPackedWriter(paths, suite.schema, bufferSize, multiPartUploadSize, columnGroups, nil)
|
pw, err := NewPackedWriter(paths, suite.schema, bufferSize, multiPartUploadSize, columnGroups, nil, nil)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
for i := 0; i < batches; i++ {
|
for i := 0; i < batches; i++ {
|
||||||
err = pw.WriteRecordBatch(rec)
|
err = pw.WriteRecordBatch(rec)
|
||||||
@ -143,7 +143,7 @@ func (suite *PackedTestSuite) TestPackedMultiFiles() {
|
|||||||
err = pw.Close()
|
err = pw.Close()
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
|
|
||||||
reader, err := NewPackedReader(paths, suite.schema, bufferSize, nil)
|
reader, err := NewPackedReader(paths, suite.schema, bufferSize, nil, nil)
|
||||||
suite.NoError(err)
|
suite.NoError(err)
|
||||||
var rows int64 = 0
|
var rows int64 = 0
|
||||||
var rr arrow.Record
|
var rr arrow.Record
|
||||||
|
|||||||
@ -33,10 +33,11 @@ import (
|
|||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/storagecommon"
|
"github.com/milvus-io/milvus/internal/storagecommon"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewPackedWriter(filePaths []string, schema *arrow.Schema, bufferSize int64, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, storageConfig *indexpb.StorageConfig) (*PackedWriter, error) {
|
func NewPackedWriter(filePaths []string, schema *arrow.Schema, bufferSize int64, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*PackedWriter, error) {
|
||||||
cFilePaths := make([]*C.char, len(filePaths))
|
cFilePaths := make([]*C.char, len(filePaths))
|
||||||
for i, path := range filePaths {
|
for i, path := range filePaths {
|
||||||
cFilePaths[i] = C.CString(path)
|
cFilePaths[i] = C.CString(path)
|
||||||
@ -71,6 +72,18 @@ func NewPackedWriter(filePaths []string, schema *arrow.Schema, bufferSize int64,
|
|||||||
var cPackedWriter C.CPackedWriter
|
var cPackedWriter C.CPackedWriter
|
||||||
var status C.CStatus
|
var status C.CStatus
|
||||||
|
|
||||||
|
var pluginContextPtr *C.CPluginContext
|
||||||
|
if storagePluginContext != nil {
|
||||||
|
ckey := C.CString(storagePluginContext.EncryptionKey)
|
||||||
|
defer C.free(unsafe.Pointer(ckey))
|
||||||
|
|
||||||
|
var pluginContext C.CPluginContext
|
||||||
|
pluginContext.ez_id = C.int64_t(storagePluginContext.EncryptionZoneId)
|
||||||
|
pluginContext.collection_id = C.int64_t(storagePluginContext.CollectionId)
|
||||||
|
pluginContext.key = ckey
|
||||||
|
pluginContextPtr = &pluginContext
|
||||||
|
}
|
||||||
|
|
||||||
if storageConfig != nil {
|
if storageConfig != nil {
|
||||||
cStorageConfig := C.CStorageConfig{
|
cStorageConfig := C.CStorageConfig{
|
||||||
address: C.CString(storageConfig.GetAddress()),
|
address: C.CString(storageConfig.GetAddress()),
|
||||||
@ -103,9 +116,9 @@ func NewPackedWriter(filePaths []string, schema *arrow.Schema, bufferSize int64,
|
|||||||
defer C.free(unsafe.Pointer(cStorageConfig.sslCACert))
|
defer C.free(unsafe.Pointer(cStorageConfig.sslCACert))
|
||||||
defer C.free(unsafe.Pointer(cStorageConfig.region))
|
defer C.free(unsafe.Pointer(cStorageConfig.region))
|
||||||
defer C.free(unsafe.Pointer(cStorageConfig.gcp_credential_json))
|
defer C.free(unsafe.Pointer(cStorageConfig.gcp_credential_json))
|
||||||
status = C.NewPackedWriterWithStorageConfig(cSchema, cBufferSize, cFilePathsArray, cNumPaths, cMultiPartUploadSize, cColumnGroups, cStorageConfig, &cPackedWriter)
|
status = C.NewPackedWriterWithStorageConfig(cSchema, cBufferSize, cFilePathsArray, cNumPaths, cMultiPartUploadSize, cColumnGroups, cStorageConfig, &cPackedWriter, pluginContextPtr)
|
||||||
} else {
|
} else {
|
||||||
status = C.NewPackedWriter(cSchema, cBufferSize, cFilePathsArray, cNumPaths, cMultiPartUploadSize, cColumnGroups, &cPackedWriter)
|
status = C.NewPackedWriter(cSchema, cBufferSize, cFilePathsArray, cNumPaths, cMultiPartUploadSize, cColumnGroups, &cPackedWriter, pluginContextPtr)
|
||||||
}
|
}
|
||||||
if err := ConsumeCStatusIntoError(&status); err != nil {
|
if err := ConsumeCStatusIntoError(&status); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -9,10 +9,12 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
|
"github.com/milvus-io/milvus/internal/streamingnode/server/resource"
|
||||||
"github.com/milvus-io/milvus/internal/streamingnode/server/service"
|
"github.com/milvus-io/milvus/internal/streamingnode/server/service"
|
||||||
"github.com/milvus-io/milvus/internal/streamingnode/server/walmanager"
|
"github.com/milvus-io/milvus/internal/streamingnode/server/walmanager"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/initcore"
|
"github.com/milvus-io/milvus/internal/util/initcore"
|
||||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/streamingpb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
||||||
_ "github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/kafka"
|
_ "github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/kafka"
|
||||||
_ "github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/pulsar"
|
_ "github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/pulsar"
|
||||||
_ "github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/rmq"
|
_ "github.com/milvus-io/milvus/pkg/v2/streaming/walimpls/impls/rmq"
|
||||||
@ -50,6 +52,9 @@ func (s *Server) init() {
|
|||||||
|
|
||||||
// init paramtable change callback for core related config
|
// init paramtable change callback for core related config
|
||||||
initcore.SetupCoreConfigChangelCallback()
|
initcore.SetupCoreConfigChangelCallback()
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
message.RegisterCipher(hookutil.GetCipher())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the streamingnode server.
|
// Stop stops the streamingnode server.
|
||||||
|
|||||||
@ -24,12 +24,14 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
|
"github.com/samber/lo"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/hook"
|
"github.com/milvus-io/milvus-proto/go-api/v2/hook"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexcgopb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
"github.com/milvus-io/milvus/pkg/v2/streaming/util/message"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||||
)
|
)
|
||||||
@ -53,14 +55,17 @@ func IsClusterEncyptionEnabled() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// Used in db and collection properties
|
||||||
EncryptionEnabledKey = "cipher.enabled"
|
EncryptionEnabledKey = "cipher.enabled"
|
||||||
EncryptionRootKeyKey = "cipher.key"
|
EncryptionRootKeyKey = "cipher.key"
|
||||||
EncryptionEzIDKey = "cipher.ezID"
|
EncryptionEzIDKey = "cipher.ezID"
|
||||||
|
|
||||||
|
// Used in Plugins
|
||||||
CipherConfigCreateEZ = "cipher.ez.create"
|
CipherConfigCreateEZ = "cipher.ez.create"
|
||||||
CipherConfigRemoveEZ = "cipher.ez.remove"
|
CipherConfigRemoveEZ = "cipher.ez.remove"
|
||||||
CipherConfigMilvusRoleName = "cipher.milvusRoleName"
|
CipherConfigMilvusRoleName = "cipher.milvusRoleName"
|
||||||
CipherConfigKeyKmsKeyArn = "cipher.kmsKeyArn"
|
CipherConfigKeyKmsKeyArn = "cipher.kmsKeyArn"
|
||||||
|
CipherConfigUnsafeEZK = "cipher.ezk"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EZ struct {
|
type EZ struct {
|
||||||
@ -80,12 +85,26 @@ type CipherContext struct {
|
|||||||
key []byte
|
key []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ContainsCipherProperties(properties []*commonpb.KeyValuePair, deletedKeys []string) bool {
|
||||||
|
for _, property := range properties {
|
||||||
|
if property.Key == EncryptionEnabledKey ||
|
||||||
|
property.Key == EncryptionEzIDKey ||
|
||||||
|
property.Key == EncryptionRootKeyKey {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lo.ContainsBy(deletedKeys, func(data string) bool {
|
||||||
|
return lo.Contains([]string{EncryptionEnabledKey, EncryptionEzIDKey, EncryptionRootKeyKey}, data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func GetEzByCollProperties(collProperties []*commonpb.KeyValuePair, collectionID int64) *EZ {
|
func GetEzByCollProperties(collProperties []*commonpb.KeyValuePair, collectionID int64) *EZ {
|
||||||
if len(collProperties) == 0 {
|
if len(collProperties) == 0 {
|
||||||
log.Warn("GetEzByCollProperties empty properties",
|
log.Warn("GetEzByCollProperties empty properties",
|
||||||
zap.Any("insertCodec collID", collectionID),
|
zap.Any("insertCodec collID", collectionID),
|
||||||
zap.Any("properties", collProperties),
|
zap.Any("properties", collProperties),
|
||||||
)
|
)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
for _, property := range collProperties {
|
for _, property := range collProperties {
|
||||||
if property.Key == EncryptionEzIDKey {
|
if property.Key == EncryptionEzIDKey {
|
||||||
@ -99,26 +118,167 @@ func GetEzByCollProperties(collProperties []*commonpb.KeyValuePair, collectionID
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TidyDBCipherProperties(dbProperties []*commonpb.KeyValuePair) ([]*commonpb.KeyValuePair, error) {
|
// GetStoragePluginContext returns the local plugin context for RPC from datacoord to datanode
|
||||||
if IsDBEncyptionEnabled(dbProperties) {
|
func GetStoragePluginContext(properties []*commonpb.KeyValuePair, collectionID int64) []*commonpb.KeyValuePair {
|
||||||
if !IsClusterEncyptionEnabled() {
|
if GetCipher() == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ez := GetEzByCollProperties(properties, collectionID); ez != nil {
|
||||||
|
key := GetCipher().GetUnsafeKey(ez.EzID, ez.CollectionID)
|
||||||
|
pluginContext := []*commonpb.KeyValuePair{
|
||||||
|
{
|
||||||
|
Key: CipherConfigCreateEZ,
|
||||||
|
Value: strconv.FormatInt(ez.EzID, 10),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: CipherConfigUnsafeEZK,
|
||||||
|
Value: string(key),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return pluginContext
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetDBCipherProperties(ezID uint64, kmsKey string) []*commonpb.KeyValuePair {
|
||||||
|
return []*commonpb.KeyValuePair{
|
||||||
|
{
|
||||||
|
Key: EncryptionEnabledKey,
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: EncryptionEzIDKey,
|
||||||
|
Value: strconv.FormatUint(ezID, 10),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: EncryptionRootKeyKey,
|
||||||
|
Value: kmsKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveEZByDBProperties(dbProperties []*commonpb.KeyValuePair) error {
|
||||||
|
if GetCipher() == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ezIdStr := ""
|
||||||
|
for _, property := range dbProperties {
|
||||||
|
if property.Key == EncryptionEzIDKey {
|
||||||
|
ezIdStr = property.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(ezIdStr) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dropConfig := map[string]string{CipherConfigRemoveEZ: ezIdStr}
|
||||||
|
if err := GetCipher().Init(dropConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateLocalEZByPluginContext(context []*commonpb.KeyValuePair) (*indexcgopb.StoragePluginContext, error) {
|
||||||
|
if GetCipher() == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
config := make(map[string]string)
|
||||||
|
ctx := &indexcgopb.StoragePluginContext{}
|
||||||
|
for _, value := range context {
|
||||||
|
if value.GetKey() == CipherConfigCreateEZ {
|
||||||
|
ezID, err := strconv.ParseInt(value.GetValue(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
config[CipherConfigCreateEZ] = value.GetValue()
|
||||||
|
ctx.EncryptionZoneId = ezID
|
||||||
|
}
|
||||||
|
if value.GetKey() == CipherConfigUnsafeEZK {
|
||||||
|
config[CipherConfigUnsafeEZK] = value.GetValue()
|
||||||
|
ctx.EncryptionKey = value.GetValue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(config) == 2 {
|
||||||
|
return ctx, GetCipher().Init(config)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateEZByDBProperties(dbProperties []*commonpb.KeyValuePair) error {
|
||||||
|
if GetCipher() == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config := make(map[string]string)
|
||||||
|
for _, property := range dbProperties {
|
||||||
|
if property.GetKey() == EncryptionEzIDKey {
|
||||||
|
config[CipherConfigCreateEZ] = property.Value
|
||||||
|
}
|
||||||
|
if property.GetKey() == EncryptionRootKeyKey {
|
||||||
|
config[CipherConfigKeyKmsKeyArn] = property.GetValue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config) == 2 {
|
||||||
|
return GetCipher().Init(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TidyDBCipherProperties(ezID int64, dbProperties []*commonpb.KeyValuePair) ([]*commonpb.KeyValuePair, error) {
|
||||||
|
dbEncryptionEnabled := IsDBEncyptionEnabled(dbProperties)
|
||||||
|
if GetCipher() == nil {
|
||||||
|
if dbEncryptionEnabled {
|
||||||
return nil, ErrCipherPluginMissing
|
return nil, ErrCipherPluginMissing
|
||||||
}
|
}
|
||||||
|
return dbProperties, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbEncryptionEnabled {
|
||||||
|
ezIDKv := &commonpb.KeyValuePair{
|
||||||
|
Key: EncryptionEzIDKey,
|
||||||
|
Value: strconv.FormatInt(ezID, 10),
|
||||||
|
}
|
||||||
|
// kmsKey already in the properties
|
||||||
for _, property := range dbProperties {
|
for _, property := range dbProperties {
|
||||||
if property.Key == EncryptionRootKeyKey {
|
if property.Key == EncryptionRootKeyKey {
|
||||||
|
dbProperties = append(dbProperties, ezIDKv)
|
||||||
return dbProperties, nil
|
return dbProperties, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if defaultRootKey := paramtable.GetCipherParams().DefaultRootKey.GetValue(); defaultRootKey != "" {
|
||||||
// set default root key from config if EncryuptionRootKeyKey left empty
|
// set default root key from config if EncryuptionRootKeyKey left empty
|
||||||
dbProperties = append(dbProperties, &commonpb.KeyValuePair{
|
dbProperties = append(dbProperties,
|
||||||
|
ezIDKv,
|
||||||
|
&commonpb.KeyValuePair{
|
||||||
Key: EncryptionRootKeyKey,
|
Key: EncryptionRootKeyKey,
|
||||||
Value: paramtable.GetCipherParams().DefaultRootKey.GetValue(),
|
Value: defaultRootKey,
|
||||||
})
|
},
|
||||||
|
)
|
||||||
|
return dbProperties, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Empty default root key for encrypted database without kms key")
|
||||||
}
|
}
|
||||||
return dbProperties, nil
|
return dbProperties, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetEzPropByDBProperties(dbProperties []*commonpb.KeyValuePair) *commonpb.KeyValuePair {
|
||||||
|
for _, property := range dbProperties {
|
||||||
|
if property.Key == EncryptionEzIDKey {
|
||||||
|
return &commonpb.KeyValuePair{
|
||||||
|
Key: EncryptionEzIDKey,
|
||||||
|
Value: property.Value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func IsDBEncyptionEnabled(dbProperties []*commonpb.KeyValuePair) bool {
|
func IsDBEncyptionEnabled(dbProperties []*commonpb.KeyValuePair) bool {
|
||||||
for _, property := range dbProperties {
|
for _, property := range dbProperties {
|
||||||
if property.Key == EncryptionEnabledKey {
|
if property.Key == EncryptionEnabledKey {
|
||||||
@ -128,15 +288,6 @@ func IsDBEncyptionEnabled(dbProperties []*commonpb.KeyValuePair) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetEZRootKeyByDBProperties(dbProperties []*commonpb.KeyValuePair) string {
|
|
||||||
for _, property := range dbProperties {
|
|
||||||
if property.Key == EncryptionRootKeyKey {
|
|
||||||
return property.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return paramtable.GetCipherParams().DefaultRootKey.GetValue()
|
|
||||||
}
|
|
||||||
|
|
||||||
// For test only
|
// For test only
|
||||||
func InitTestCipher() {
|
func InitTestCipher() {
|
||||||
InitOnceCipher()
|
InitOnceCipher()
|
||||||
|
|||||||
@ -88,24 +88,35 @@ func (s *CipherSuite) TestTidyDBCipherProperties() {
|
|||||||
{Key: EncryptionEnabledKey, Value: "true"},
|
{Key: EncryptionEnabledKey, Value: "true"},
|
||||||
{Key: EncryptionRootKeyKey, Value: "existing-root-key"},
|
{Key: EncryptionRootKeyKey, Value: "existing-root-key"},
|
||||||
}
|
}
|
||||||
result, err := TidyDBCipherProperties(dbPropertiesWithRootKey)
|
result, err := TidyDBCipherProperties(1, dbPropertiesWithRootKey)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.Equal(dbPropertiesWithRootKey, result)
|
s.Equal(3, len(result))
|
||||||
|
for _, kv := range result {
|
||||||
|
switch kv.Key {
|
||||||
|
case EncryptionEnabledKey:
|
||||||
|
s.Equal(kv.Value, "true")
|
||||||
|
case EncryptionEzIDKey:
|
||||||
|
s.Equal(kv.Value, "1")
|
||||||
|
case EncryptionRootKeyKey:
|
||||||
|
s.Equal(kv.Value, "existing-root-key")
|
||||||
|
default:
|
||||||
|
s.Fail("unexpected key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test with encryption enabled and test cipher available
|
// Test with encryption enabled and test cipher available
|
||||||
|
// Default rootkey is empty
|
||||||
InitTestCipher()
|
InitTestCipher()
|
||||||
dbPropertiesWithoutRootKey := []*commonpb.KeyValuePair{
|
dbPropertiesWithoutRootKey := []*commonpb.KeyValuePair{
|
||||||
{Key: EncryptionEnabledKey, Value: "true"},
|
{Key: EncryptionEnabledKey, Value: "true"},
|
||||||
}
|
}
|
||||||
result, err = TidyDBCipherProperties(dbPropertiesWithoutRootKey)
|
result, err = TidyDBCipherProperties(1, dbPropertiesWithoutRootKey)
|
||||||
s.NoError(err)
|
s.Error(err)
|
||||||
s.Len(result, 2) // should have EncryptionEnabledKey + added default root key
|
s.Nil(result)
|
||||||
s.Equal(EncryptionEnabledKey, result[0].Key)
|
|
||||||
s.Equal(EncryptionRootKeyKey, result[1].Key)
|
|
||||||
|
|
||||||
// Test without encryption enabled
|
// Test without encryption enabled
|
||||||
dbPropertiesWithoutEncryption := []*commonpb.KeyValuePair{}
|
dbPropertiesWithoutEncryption := []*commonpb.KeyValuePair{}
|
||||||
result, err = TidyDBCipherProperties(dbPropertiesWithoutEncryption)
|
result, err = TidyDBCipherProperties(1, dbPropertiesWithoutEncryption)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.NotNil(result)
|
s.NotNil(result)
|
||||||
s.Equal(dbPropertiesWithoutEncryption, result)
|
s.Equal(dbPropertiesWithoutEncryption, result)
|
||||||
@ -121,25 +132,13 @@ func (s *CipherSuite) TestIsDBEncyptionEnabled() {
|
|||||||
s.False(IsDBEncyptionEnabled(dbProperties))
|
s.False(IsDBEncyptionEnabled(dbProperties))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *CipherSuite) TestGetEZRootKeyByDBProperties() {
|
|
||||||
dbProperties := []*commonpb.KeyValuePair{
|
|
||||||
{Key: EncryptionRootKeyKey, Value: "rootKey"},
|
|
||||||
}
|
|
||||||
rootKey := GetEZRootKeyByDBProperties(dbProperties)
|
|
||||||
s.Equal("rootKey", rootKey)
|
|
||||||
|
|
||||||
emptyProperties := []*commonpb.KeyValuePair{}
|
|
||||||
defaultRootKey := GetEZRootKeyByDBProperties(emptyProperties)
|
|
||||||
s.Equal(paramtable.GetCipherParams().DefaultRootKey.GetValue(), defaultRootKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *CipherSuite) TestTidyDBCipherPropertiesError() {
|
func (s *CipherSuite) TestTidyDBCipherPropertiesError() {
|
||||||
// Reset cipher to nil to test error case
|
// Reset cipher to nil to test error case
|
||||||
storeCipher(nil)
|
storeCipher(nil)
|
||||||
dbProperties := []*commonpb.KeyValuePair{
|
dbProperties := []*commonpb.KeyValuePair{
|
||||||
{Key: EncryptionEnabledKey, Value: "true"},
|
{Key: EncryptionEnabledKey, Value: "true"},
|
||||||
}
|
}
|
||||||
_, err := TidyDBCipherProperties(dbProperties)
|
_, err := TidyDBCipherProperties(1, dbProperties)
|
||||||
s.Error(err)
|
s.Error(err)
|
||||||
s.Equal(ErrCipherPluginMissing, err)
|
s.Equal(ErrCipherPluginMissing, err)
|
||||||
}
|
}
|
||||||
@ -159,3 +158,23 @@ func (s *CipherSuite) TestIsClusterEncyptionEnabled() {
|
|||||||
InitTestCipher()
|
InitTestCipher()
|
||||||
s.True(IsClusterEncyptionEnabled())
|
s.True(IsClusterEncyptionEnabled())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *CipherSuite) TestContainsCipherProperty() {
|
||||||
|
tests := []struct {
|
||||||
|
props []*commonpb.KeyValuePair
|
||||||
|
keys []string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{[]*commonpb.KeyValuePair{{Key: EncryptionEnabledKey, Value: "true"}}, nil, true},
|
||||||
|
{[]*commonpb.KeyValuePair{{Key: EncryptionEzIDKey, Value: "123"}}, nil, true},
|
||||||
|
{[]*commonpb.KeyValuePair{{Key: EncryptionRootKeyKey, Value: "abc"}}, nil, true},
|
||||||
|
{nil, []string{EncryptionEnabledKey}, true},
|
||||||
|
{nil, []string{EncryptionEzIDKey}, true},
|
||||||
|
{nil, []string{EncryptionRootKeyKey}, true},
|
||||||
|
{[]*commonpb.KeyValuePair{{Key: "key1", Value: "value1"}}, []string{"others"}, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
s.Equal(test.expected, ContainsCipherProperties(test.props, test.keys))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -113,6 +113,9 @@ func (r *reader) init(paths []string, tsStart, tsEnd uint64, storageConfig *inde
|
|||||||
validIDs := lo.Keys(r.insertLogs)
|
validIDs := lo.Keys(r.insertLogs)
|
||||||
log.Info("create binlog reader for these fields", zap.Any("validIDs", validIDs))
|
log.Info("create binlog reader for these fields", zap.Any("validIDs", validIDs))
|
||||||
|
|
||||||
|
// TODO:[GOOSE] Backup related changes: No CollectionID and schema comes from to write collection
|
||||||
|
// means this reader cannot read encrypted files.
|
||||||
|
// StoragePlugin config is wrong for backuped binlogs
|
||||||
rr, err := storage.NewBinlogRecordReader(r.ctx, binlogs, r.schema,
|
rr, err := storage.NewBinlogRecordReader(r.ctx, binlogs, r.schema,
|
||||||
storage.WithVersion(r.storageVersion),
|
storage.WithVersion(r.storageVersion),
|
||||||
storage.WithBufferSize(32*1024*1024),
|
storage.WithBufferSize(32*1024*1024),
|
||||||
|
|||||||
@ -59,6 +59,7 @@ func readData(reader *storage.BinlogReader, et storage.EventTypeCode) ([]any, []
|
|||||||
return rowsSet, validDataRowsSet, nil
|
return rowsSet, validDataRowsSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// read delete data only
|
||||||
func newBinlogReader(ctx context.Context, cm storage.ChunkManager, path string) (*storage.BinlogReader, error) {
|
func newBinlogReader(ctx context.Context, cm storage.ChunkManager, path string) (*storage.BinlogReader, error) {
|
||||||
bytes, err := cm.Read(ctx, path) // TODO: dyh, checks if the error is a retryable error
|
bytes, err := cm.Read(ctx, path) // TODO: dyh, checks if the error is a retryable error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -325,6 +325,7 @@ func (index *CgoIndex) buildStringIndex(dataset *Dataset) error {
|
|||||||
return HandleCStatus(&status, "failed to build scalar index")
|
return HandleCStatus(&status, "failed to build scalar index")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test only
|
||||||
func (index *CgoIndex) Serialize() ([]*Blob, error) {
|
func (index *CgoIndex) Serialize() ([]*Blob, error) {
|
||||||
var cBinarySet C.CBinarySet
|
var cBinarySet C.CBinarySet
|
||||||
|
|
||||||
@ -363,6 +364,7 @@ func (index *CgoIndex) Serialize() ([]*Blob, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Not inuse
|
||||||
func (index *CgoIndex) GetIndexFileInfo() ([]*IndexFileInfo, error) {
|
func (index *CgoIndex) GetIndexFileInfo() ([]*IndexFileInfo, error) {
|
||||||
var cBinarySet C.CBinarySet
|
var cBinarySet C.CBinarySet
|
||||||
|
|
||||||
|
|||||||
@ -39,8 +39,10 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||||
)
|
)
|
||||||
@ -486,3 +488,18 @@ func serializeHeaders(headerstr string) string {
|
|||||||
}
|
}
|
||||||
return string(decodeheaders)
|
return string(decodeheaders)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func InitPluginLoader() error {
|
||||||
|
if hookutil.IsClusterEncyptionEnabled() {
|
||||||
|
cSoPath := C.CString(paramtable.GetCipherParams().SoPathCpp.GetValue())
|
||||||
|
log.Info("Init PluginLoader", zap.String("soPath", paramtable.GetCipherParams().SoPathCpp.GetValue()))
|
||||||
|
defer C.free(unsafe.Pointer(cSoPath))
|
||||||
|
status := C.InitPluginLoader(cSoPath)
|
||||||
|
return HandleCStatus(&status, "InitPluginLoader failed")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CleanPluginLoader() {
|
||||||
|
C.CleanPluginLoader()
|
||||||
|
}
|
||||||
|
|||||||
@ -5,6 +5,7 @@ package segcore
|
|||||||
|
|
||||||
#include "segcore/collection_c.h"
|
#include "segcore/collection_c.h"
|
||||||
#include "segcore/segment_c.h"
|
#include "segcore/segment_c.h"
|
||||||
|
#include "storage/storage_c.h"
|
||||||
*/
|
*/
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
@ -12,9 +13,12 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cockroachdb/errors"
|
"github.com/cockroachdb/errors"
|
||||||
|
"go.uber.org/zap"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
|
"github.com/milvus-io/milvus/internal/util/hookutil"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/proto/segcorepb"
|
"github.com/milvus-io/milvus/pkg/v2/proto/segcorepb"
|
||||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
)
|
)
|
||||||
@ -114,3 +118,36 @@ func (c *CCollection) Release() {
|
|||||||
C.DeleteCollection(c.ptr)
|
C.DeleteCollection(c.ptr)
|
||||||
c.ptr = nil
|
c.ptr = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PutOrRefPluginContext(ez *hookutil.EZ, key string) error {
|
||||||
|
log.Info("PutOrRefPluginContext",
|
||||||
|
zap.Int64("ez_id", ez.EzID),
|
||||||
|
zap.Int64("collection_id", ez.CollectionID))
|
||||||
|
ckey := C.CString(key)
|
||||||
|
defer C.free(unsafe.Pointer(ckey))
|
||||||
|
pluginContext := C.CPluginContext{
|
||||||
|
ez_id: C.int64_t(ez.EzID),
|
||||||
|
collection_id: C.int64_t(ez.CollectionID),
|
||||||
|
key: ckey,
|
||||||
|
}
|
||||||
|
cstatus := C.PutOrRefPluginContext(pluginContext)
|
||||||
|
if err := ConsumeCStatusIntoError(&cstatus); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnRefPluginContext(ez *hookutil.EZ) error {
|
||||||
|
log.Info("UnRefPluginContext",
|
||||||
|
zap.Int64("ez_id", ez.EzID),
|
||||||
|
zap.Int64("collection_id", ez.CollectionID))
|
||||||
|
pluginContext := C.CPluginContext{
|
||||||
|
ez_id: C.int64_t(ez.EzID),
|
||||||
|
collection_id: C.int64_t(ez.CollectionID),
|
||||||
|
}
|
||||||
|
cstatus := C.UnRefPluginContext(pluginContext)
|
||||||
|
if err := ConsumeCStatusIntoError(&cstatus); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@ -680,6 +680,7 @@ message CompactionPlan {
|
|||||||
IDRange pre_allocated_logIDs = 21;
|
IDRange pre_allocated_logIDs = 21;
|
||||||
string json_params = 22;
|
string json_params = 22;
|
||||||
int32 current_scalar_index_version = 23;
|
int32 current_scalar_index_version = 23;
|
||||||
|
repeated common.KeyValuePair plugin_context = 29;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CompactionSegment {
|
message CompactionSegment {
|
||||||
@ -859,6 +860,7 @@ message PreImportRequest {
|
|||||||
repeated common.KeyValuePair options = 9;
|
repeated common.KeyValuePair options = 9;
|
||||||
index.StorageConfig storage_config = 10;
|
index.StorageConfig storage_config = 10;
|
||||||
int64 task_slot = 11;
|
int64 task_slot = 11;
|
||||||
|
repeated common.KeyValuePair plugin_context = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message IDRange {
|
message IDRange {
|
||||||
@ -888,6 +890,7 @@ message ImportRequest {
|
|||||||
index.StorageConfig storage_config = 13;
|
index.StorageConfig storage_config = 13;
|
||||||
int64 task_slot = 14;
|
int64 task_slot = 14;
|
||||||
int64 storage_version = 15;
|
int64 storage_version = 15;
|
||||||
|
repeated common.KeyValuePair plugin_context = 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
message QueryPreImportRequest {
|
message QueryPreImportRequest {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -95,6 +95,13 @@ message BuildIndexInfo {
|
|||||||
int64 lack_binlog_rows = 23;
|
int64 lack_binlog_rows = 23;
|
||||||
int64 storage_version = 24;
|
int64 storage_version = 24;
|
||||||
SegmentInsertFiles segment_insert_files = 25;
|
SegmentInsertFiles segment_insert_files = 25;
|
||||||
|
StoragePluginContext storage_plugin_context = 26;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StoragePluginContext {
|
||||||
|
int64 encryption_zone_id = 1;
|
||||||
|
int64 collection_id = 2;
|
||||||
|
string encryption_key = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LoadTextIndexInfo {
|
message LoadTextIndexInfo {
|
||||||
|
|||||||
@ -671,6 +671,7 @@ type BuildIndexInfo struct {
|
|||||||
LackBinlogRows int64 `protobuf:"varint,23,opt,name=lack_binlog_rows,json=lackBinlogRows,proto3" json:"lack_binlog_rows,omitempty"`
|
LackBinlogRows int64 `protobuf:"varint,23,opt,name=lack_binlog_rows,json=lackBinlogRows,proto3" json:"lack_binlog_rows,omitempty"`
|
||||||
StorageVersion int64 `protobuf:"varint,24,opt,name=storage_version,json=storageVersion,proto3" json:"storage_version,omitempty"`
|
StorageVersion int64 `protobuf:"varint,24,opt,name=storage_version,json=storageVersion,proto3" json:"storage_version,omitempty"`
|
||||||
SegmentInsertFiles *SegmentInsertFiles `protobuf:"bytes,25,opt,name=segment_insert_files,json=segmentInsertFiles,proto3" json:"segment_insert_files,omitempty"`
|
SegmentInsertFiles *SegmentInsertFiles `protobuf:"bytes,25,opt,name=segment_insert_files,json=segmentInsertFiles,proto3" json:"segment_insert_files,omitempty"`
|
||||||
|
StoragePluginContext *StoragePluginContext `protobuf:"bytes,26,opt,name=storage_plugin_context,json=storagePluginContext,proto3" json:"storage_plugin_context,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *BuildIndexInfo) Reset() {
|
func (x *BuildIndexInfo) Reset() {
|
||||||
@ -880,6 +881,76 @@ func (x *BuildIndexInfo) GetSegmentInsertFiles() *SegmentInsertFiles {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *BuildIndexInfo) GetStoragePluginContext() *StoragePluginContext {
|
||||||
|
if x != nil {
|
||||||
|
return x.StoragePluginContext
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type StoragePluginContext struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
EncryptionZoneId int64 `protobuf:"varint,1,opt,name=encryption_zone_id,json=encryptionZoneId,proto3" json:"encryption_zone_id,omitempty"`
|
||||||
|
CollectionId int64 `protobuf:"varint,2,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"`
|
||||||
|
EncryptionKey string `protobuf:"bytes,3,opt,name=encryption_key,json=encryptionKey,proto3" json:"encryption_key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *StoragePluginContext) Reset() {
|
||||||
|
*x = StoragePluginContext{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_index_cgo_msg_proto_msgTypes[11]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *StoragePluginContext) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*StoragePluginContext) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *StoragePluginContext) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_index_cgo_msg_proto_msgTypes[11]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use StoragePluginContext.ProtoReflect.Descriptor instead.
|
||||||
|
func (*StoragePluginContext) Descriptor() ([]byte, []int) {
|
||||||
|
return file_index_cgo_msg_proto_rawDescGZIP(), []int{11}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *StoragePluginContext) GetEncryptionZoneId() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.EncryptionZoneId
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *StoragePluginContext) GetCollectionId() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.CollectionId
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *StoragePluginContext) GetEncryptionKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.EncryptionKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
type LoadTextIndexInfo struct {
|
type LoadTextIndexInfo struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
@ -899,7 +970,7 @@ type LoadTextIndexInfo struct {
|
|||||||
func (x *LoadTextIndexInfo) Reset() {
|
func (x *LoadTextIndexInfo) Reset() {
|
||||||
*x = LoadTextIndexInfo{}
|
*x = LoadTextIndexInfo{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_index_cgo_msg_proto_msgTypes[11]
|
mi := &file_index_cgo_msg_proto_msgTypes[12]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -912,7 +983,7 @@ func (x *LoadTextIndexInfo) String() string {
|
|||||||
func (*LoadTextIndexInfo) ProtoMessage() {}
|
func (*LoadTextIndexInfo) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *LoadTextIndexInfo) ProtoReflect() protoreflect.Message {
|
func (x *LoadTextIndexInfo) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_index_cgo_msg_proto_msgTypes[11]
|
mi := &file_index_cgo_msg_proto_msgTypes[12]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -925,7 +996,7 @@ func (x *LoadTextIndexInfo) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use LoadTextIndexInfo.ProtoReflect.Descriptor instead.
|
// Deprecated: Use LoadTextIndexInfo.ProtoReflect.Descriptor instead.
|
||||||
func (*LoadTextIndexInfo) Descriptor() ([]byte, []int) {
|
func (*LoadTextIndexInfo) Descriptor() ([]byte, []int) {
|
||||||
return file_index_cgo_msg_proto_rawDescGZIP(), []int{11}
|
return file_index_cgo_msg_proto_rawDescGZIP(), []int{12}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *LoadTextIndexInfo) GetFieldID() int64 {
|
func (x *LoadTextIndexInfo) GetFieldID() int64 {
|
||||||
@ -1010,7 +1081,7 @@ type LoadJsonKeyIndexInfo struct {
|
|||||||
func (x *LoadJsonKeyIndexInfo) Reset() {
|
func (x *LoadJsonKeyIndexInfo) Reset() {
|
||||||
*x = LoadJsonKeyIndexInfo{}
|
*x = LoadJsonKeyIndexInfo{}
|
||||||
if protoimpl.UnsafeEnabled {
|
if protoimpl.UnsafeEnabled {
|
||||||
mi := &file_index_cgo_msg_proto_msgTypes[12]
|
mi := &file_index_cgo_msg_proto_msgTypes[13]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
}
|
||||||
@ -1023,7 +1094,7 @@ func (x *LoadJsonKeyIndexInfo) String() string {
|
|||||||
func (*LoadJsonKeyIndexInfo) ProtoMessage() {}
|
func (*LoadJsonKeyIndexInfo) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *LoadJsonKeyIndexInfo) ProtoReflect() protoreflect.Message {
|
func (x *LoadJsonKeyIndexInfo) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_index_cgo_msg_proto_msgTypes[12]
|
mi := &file_index_cgo_msg_proto_msgTypes[13]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
@ -1036,7 +1107,7 @@ func (x *LoadJsonKeyIndexInfo) ProtoReflect() protoreflect.Message {
|
|||||||
|
|
||||||
// Deprecated: Use LoadJsonKeyIndexInfo.ProtoReflect.Descriptor instead.
|
// Deprecated: Use LoadJsonKeyIndexInfo.ProtoReflect.Descriptor instead.
|
||||||
func (*LoadJsonKeyIndexInfo) Descriptor() ([]byte, []int) {
|
func (*LoadJsonKeyIndexInfo) Descriptor() ([]byte, []int) {
|
||||||
return file_index_cgo_msg_proto_rawDescGZIP(), []int{12}
|
return file_index_cgo_msg_proto_rawDescGZIP(), []int{13}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *LoadJsonKeyIndexInfo) GetFieldID() int64 {
|
func (x *LoadJsonKeyIndexInfo) GetFieldID() int64 {
|
||||||
@ -1191,7 +1262,7 @@ var file_index_cgo_msg_proto_rawDesc = []byte{
|
|||||||
0x65, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09,
|
0x65, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09,
|
||||||
0x66, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74,
|
0x66, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74,
|
||||||
0x61, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64,
|
0x61, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64,
|
||||||
0x61, 0x74, 0x61, 0x50, 0x61, 0x74, 0x68, 0x73, 0x22, 0xbf, 0x09, 0x0a, 0x0e, 0x42, 0x75, 0x69,
|
0x61, 0x74, 0x61, 0x50, 0x61, 0x74, 0x68, 0x73, 0x22, 0xa2, 0x0a, 0x0a, 0x0e, 0x42, 0x75, 0x69,
|
||||||
0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x63,
|
0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x63,
|
||||||
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||||
0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69,
|
0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69,
|
||||||
@ -1267,30 +1338,23 @@ var file_index_cgo_msg_proto_rawDesc = []byte{
|
|||||||
0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78,
|
0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78,
|
||||||
0x63, 0x67, 0x6f, 0x2e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x73, 0x65, 0x72,
|
0x63, 0x67, 0x6f, 0x2e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x73, 0x65, 0x72,
|
||||||
0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49,
|
0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49,
|
||||||
0x6e, 0x73, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xe0, 0x02, 0x0a, 0x11, 0x4c,
|
0x6e, 0x73, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x61, 0x0a, 0x16, 0x73, 0x74,
|
||||||
0x6f, 0x61, 0x64, 0x54, 0x65, 0x78, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x49, 0x6e, 0x66, 0x6f,
|
0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6e,
|
||||||
0x12, 0x18, 0x0a, 0x07, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28,
|
0x74, 0x65, 0x78, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x69, 0x6c,
|
||||||
0x03, 0x52, 0x07, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65,
|
0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x63,
|
||||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72,
|
0x67, 0x6f, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
|
||||||
0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x44, 0x18,
|
0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
|
||||||
0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x44, 0x12, 0x14,
|
0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x90, 0x01,
|
||||||
0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x66,
|
0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43,
|
||||||
0x69, 0x6c, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05,
|
0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
|
||||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72,
|
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||||
0x6f, 0x74, 0x6f, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
|
0x28, 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5a, 0x6f,
|
||||||
0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x22,
|
0x6e, 0x65, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
|
||||||
0x0a, 0x0c, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x06,
|
0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x6f, 0x6c,
|
||||||
0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x63,
|
||||||
0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49,
|
0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||||
0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
|
0x09, 0x52, 0x0d, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79,
|
||||||
0x6f, 0x6e, 0x49, 0x44, 0x12, 0x46, 0x0a, 0x0d, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69,
|
0x22, 0xe0, 0x02, 0x0a, 0x11, 0x4c, 0x6f, 0x61, 0x64, 0x54, 0x65, 0x78, 0x74, 0x49, 0x6e, 0x64,
|
||||||
0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x6d, 0x69,
|
|
||||||
0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
|
|
||||||
0x6e, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0c,
|
|
||||||
0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b,
|
|
||||||
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x6d, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28,
|
|
||||||
0x08, 0x52, 0x0a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x6d, 0x61, 0x70, 0x22, 0xe3, 0x02,
|
|
||||||
0x0a, 0x14, 0x4c, 0x6f, 0x61, 0x64, 0x4a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x64,
|
|
||||||
0x65, 0x78, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49,
|
0x65, 0x78, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49,
|
||||||
0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x44,
|
0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x44,
|
||||||
0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||||
@ -1312,11 +1376,33 @@ var file_index_cgo_msg_proto_rawDesc = []byte{
|
|||||||
0x72, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69,
|
0x72, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69,
|
||||||
0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x6d, 0x61,
|
0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x6d, 0x61,
|
||||||
0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d,
|
0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d,
|
||||||
0x6d, 0x61, 0x70, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
|
0x6d, 0x61, 0x70, 0x22, 0xe3, 0x02, 0x0a, 0x14, 0x4c, 0x6f, 0x61, 0x64, 0x4a, 0x73, 0x6f, 0x6e,
|
||||||
0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69, 0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76,
|
0x4b, 0x65, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07,
|
||||||
0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x46,
|
||||||
0x69, 0x6e, 0x64, 0x65, 0x78, 0x63, 0x67, 0x6f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
0x69, 0x65, 0x6c, 0x64, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
|
||||||
0x6f, 0x33,
|
0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
|
||||||
|
0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||||
|
0x03, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69,
|
||||||
|
0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73,
|
||||||
|
0x12, 0x38, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
|
||||||
|
0x32, 0x20, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
|
||||||
|
0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x63, 0x68, 0x65,
|
||||||
|
0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f,
|
||||||
|
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
|
||||||
|
0x52, 0x0c, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x20,
|
||||||
|
0x0a, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x07, 0x20,
|
||||||
|
0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44,
|
||||||
|
0x12, 0x46, 0x0a, 0x0d, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74,
|
||||||
|
0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73,
|
||||||
|
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f,
|
||||||
|
0x61, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x61, 0x64,
|
||||||
|
0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x61, 0x62,
|
||||||
|
0x6c, 0x65, 0x5f, 0x6d, 0x6d, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x65,
|
||||||
|
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x6d, 0x61, 0x70, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74,
|
||||||
|
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2d, 0x69,
|
||||||
|
0x6f, 0x2f, 0x6d, 0x69, 0x6c, 0x76, 0x75, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x76, 0x32, 0x2f,
|
||||||
|
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x63, 0x67, 0x6f, 0x70, 0x62,
|
||||||
|
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -1331,7 +1417,7 @@ func file_index_cgo_msg_proto_rawDescGZIP() []byte {
|
|||||||
return file_index_cgo_msg_proto_rawDescData
|
return file_index_cgo_msg_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_index_cgo_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
|
var file_index_cgo_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
|
||||||
var file_index_cgo_msg_proto_goTypes = []interface{}{
|
var file_index_cgo_msg_proto_goTypes = []interface{}{
|
||||||
(*TypeParams)(nil), // 0: milvus.proto.indexcgo.TypeParams
|
(*TypeParams)(nil), // 0: milvus.proto.indexcgo.TypeParams
|
||||||
(*IndexParams)(nil), // 1: milvus.proto.indexcgo.IndexParams
|
(*IndexParams)(nil), // 1: milvus.proto.indexcgo.IndexParams
|
||||||
@ -1344,35 +1430,37 @@ var file_index_cgo_msg_proto_goTypes = []interface{}{
|
|||||||
(*StorageConfig)(nil), // 8: milvus.proto.indexcgo.StorageConfig
|
(*StorageConfig)(nil), // 8: milvus.proto.indexcgo.StorageConfig
|
||||||
(*OptionalFieldInfo)(nil), // 9: milvus.proto.indexcgo.OptionalFieldInfo
|
(*OptionalFieldInfo)(nil), // 9: milvus.proto.indexcgo.OptionalFieldInfo
|
||||||
(*BuildIndexInfo)(nil), // 10: milvus.proto.indexcgo.BuildIndexInfo
|
(*BuildIndexInfo)(nil), // 10: milvus.proto.indexcgo.BuildIndexInfo
|
||||||
(*LoadTextIndexInfo)(nil), // 11: milvus.proto.indexcgo.LoadTextIndexInfo
|
(*StoragePluginContext)(nil), // 11: milvus.proto.indexcgo.StoragePluginContext
|
||||||
(*LoadJsonKeyIndexInfo)(nil), // 12: milvus.proto.indexcgo.LoadJsonKeyIndexInfo
|
(*LoadTextIndexInfo)(nil), // 12: milvus.proto.indexcgo.LoadTextIndexInfo
|
||||||
nil, // 13: milvus.proto.indexcgo.MapParamsV2.ParamsEntry
|
(*LoadJsonKeyIndexInfo)(nil), // 13: milvus.proto.indexcgo.LoadJsonKeyIndexInfo
|
||||||
(*commonpb.KeyValuePair)(nil), // 14: milvus.proto.common.KeyValuePair
|
nil, // 14: milvus.proto.indexcgo.MapParamsV2.ParamsEntry
|
||||||
(*schemapb.FieldSchema)(nil), // 15: milvus.proto.schema.FieldSchema
|
(*commonpb.KeyValuePair)(nil), // 15: milvus.proto.common.KeyValuePair
|
||||||
(commonpb.LoadPriority)(0), // 16: milvus.proto.common.LoadPriority
|
(*schemapb.FieldSchema)(nil), // 16: milvus.proto.schema.FieldSchema
|
||||||
|
(commonpb.LoadPriority)(0), // 17: milvus.proto.common.LoadPriority
|
||||||
}
|
}
|
||||||
var file_index_cgo_msg_proto_depIdxs = []int32{
|
var file_index_cgo_msg_proto_depIdxs = []int32{
|
||||||
14, // 0: milvus.proto.indexcgo.TypeParams.params:type_name -> milvus.proto.common.KeyValuePair
|
15, // 0: milvus.proto.indexcgo.TypeParams.params:type_name -> milvus.proto.common.KeyValuePair
|
||||||
14, // 1: milvus.proto.indexcgo.IndexParams.params:type_name -> milvus.proto.common.KeyValuePair
|
15, // 1: milvus.proto.indexcgo.IndexParams.params:type_name -> milvus.proto.common.KeyValuePair
|
||||||
14, // 2: milvus.proto.indexcgo.MapParams.params:type_name -> milvus.proto.common.KeyValuePair
|
15, // 2: milvus.proto.indexcgo.MapParams.params:type_name -> milvus.proto.common.KeyValuePair
|
||||||
13, // 3: milvus.proto.indexcgo.MapParamsV2.params:type_name -> milvus.proto.indexcgo.MapParamsV2.ParamsEntry
|
14, // 3: milvus.proto.indexcgo.MapParamsV2.params:type_name -> milvus.proto.indexcgo.MapParamsV2.ParamsEntry
|
||||||
4, // 4: milvus.proto.indexcgo.BinarySet.datas:type_name -> milvus.proto.indexcgo.Binary
|
4, // 4: milvus.proto.indexcgo.BinarySet.datas:type_name -> milvus.proto.indexcgo.Binary
|
||||||
6, // 5: milvus.proto.indexcgo.SegmentInsertFiles.field_insert_files:type_name -> milvus.proto.indexcgo.FieldInsertFiles
|
6, // 5: milvus.proto.indexcgo.SegmentInsertFiles.field_insert_files:type_name -> milvus.proto.indexcgo.FieldInsertFiles
|
||||||
15, // 6: milvus.proto.indexcgo.BuildIndexInfo.field_schema:type_name -> milvus.proto.schema.FieldSchema
|
16, // 6: milvus.proto.indexcgo.BuildIndexInfo.field_schema:type_name -> milvus.proto.schema.FieldSchema
|
||||||
8, // 7: milvus.proto.indexcgo.BuildIndexInfo.storage_config:type_name -> milvus.proto.indexcgo.StorageConfig
|
8, // 7: milvus.proto.indexcgo.BuildIndexInfo.storage_config:type_name -> milvus.proto.indexcgo.StorageConfig
|
||||||
14, // 8: milvus.proto.indexcgo.BuildIndexInfo.index_params:type_name -> milvus.proto.common.KeyValuePair
|
15, // 8: milvus.proto.indexcgo.BuildIndexInfo.index_params:type_name -> milvus.proto.common.KeyValuePair
|
||||||
14, // 9: milvus.proto.indexcgo.BuildIndexInfo.type_params:type_name -> milvus.proto.common.KeyValuePair
|
15, // 9: milvus.proto.indexcgo.BuildIndexInfo.type_params:type_name -> milvus.proto.common.KeyValuePair
|
||||||
9, // 10: milvus.proto.indexcgo.BuildIndexInfo.opt_fields:type_name -> milvus.proto.indexcgo.OptionalFieldInfo
|
9, // 10: milvus.proto.indexcgo.BuildIndexInfo.opt_fields:type_name -> milvus.proto.indexcgo.OptionalFieldInfo
|
||||||
7, // 11: milvus.proto.indexcgo.BuildIndexInfo.segment_insert_files:type_name -> milvus.proto.indexcgo.SegmentInsertFiles
|
7, // 11: milvus.proto.indexcgo.BuildIndexInfo.segment_insert_files:type_name -> milvus.proto.indexcgo.SegmentInsertFiles
|
||||||
15, // 12: milvus.proto.indexcgo.LoadTextIndexInfo.schema:type_name -> milvus.proto.schema.FieldSchema
|
11, // 12: milvus.proto.indexcgo.BuildIndexInfo.storage_plugin_context:type_name -> milvus.proto.indexcgo.StoragePluginContext
|
||||||
16, // 13: milvus.proto.indexcgo.LoadTextIndexInfo.load_priority:type_name -> milvus.proto.common.LoadPriority
|
16, // 13: milvus.proto.indexcgo.LoadTextIndexInfo.schema:type_name -> milvus.proto.schema.FieldSchema
|
||||||
15, // 14: milvus.proto.indexcgo.LoadJsonKeyIndexInfo.schema:type_name -> milvus.proto.schema.FieldSchema
|
17, // 14: milvus.proto.indexcgo.LoadTextIndexInfo.load_priority:type_name -> milvus.proto.common.LoadPriority
|
||||||
16, // 15: milvus.proto.indexcgo.LoadJsonKeyIndexInfo.load_priority:type_name -> milvus.proto.common.LoadPriority
|
16, // 15: milvus.proto.indexcgo.LoadJsonKeyIndexInfo.schema:type_name -> milvus.proto.schema.FieldSchema
|
||||||
16, // [16:16] is the sub-list for method output_type
|
17, // 16: milvus.proto.indexcgo.LoadJsonKeyIndexInfo.load_priority:type_name -> milvus.proto.common.LoadPriority
|
||||||
16, // [16:16] is the sub-list for method input_type
|
17, // [17:17] is the sub-list for method output_type
|
||||||
16, // [16:16] is the sub-list for extension type_name
|
17, // [17:17] is the sub-list for method input_type
|
||||||
16, // [16:16] is the sub-list for extension extendee
|
17, // [17:17] is the sub-list for extension type_name
|
||||||
0, // [0:16] is the sub-list for field type_name
|
17, // [17:17] is the sub-list for extension extendee
|
||||||
|
0, // [0:17] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_index_cgo_msg_proto_init() }
|
func init() { file_index_cgo_msg_proto_init() }
|
||||||
@ -1514,7 +1602,7 @@ func file_index_cgo_msg_proto_init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_index_cgo_msg_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
|
file_index_cgo_msg_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*LoadTextIndexInfo); i {
|
switch v := v.(*StoragePluginContext); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
case 1:
|
case 1:
|
||||||
@ -1526,6 +1614,18 @@ func file_index_cgo_msg_proto_init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
file_index_cgo_msg_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
|
file_index_cgo_msg_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*LoadTextIndexInfo); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_index_cgo_msg_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
|
||||||
switch v := v.(*LoadJsonKeyIndexInfo); i {
|
switch v := v.(*LoadJsonKeyIndexInfo); i {
|
||||||
case 0:
|
case 0:
|
||||||
return &v.state
|
return &v.state
|
||||||
@ -1544,7 +1644,7 @@ func file_index_cgo_msg_proto_init() {
|
|||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_index_cgo_msg_proto_rawDesc,
|
RawDescriptor: file_index_cgo_msg_proto_rawDesc,
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 14,
|
NumMessages: 15,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
NumServices: 0,
|
NumServices: 0,
|
||||||
},
|
},
|
||||||
|
|||||||
@ -103,6 +103,7 @@ message CreateJobRequest {
|
|||||||
int64 storage_version = 29;
|
int64 storage_version = 29;
|
||||||
int64 lack_binlog_rows = 30;
|
int64 lack_binlog_rows = 30;
|
||||||
repeated data.FieldBinlog insert_logs = 31;
|
repeated data.FieldBinlog insert_logs = 31;
|
||||||
|
repeated common.KeyValuePair plugin_context = 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
message QueryJobsRequest {
|
message QueryJobsRequest {
|
||||||
@ -155,6 +156,7 @@ message AnalyzeRequest {
|
|||||||
double max_cluster_size_ratio = 16;
|
double max_cluster_size_ratio = 16;
|
||||||
int64 max_cluster_size = 17;
|
int64 max_cluster_size = 17;
|
||||||
int64 task_slot = 18;
|
int64 task_slot = 18;
|
||||||
|
repeated common.KeyValuePair plugin_context = 19;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateStatsRequest {
|
message CreateStatsRequest {
|
||||||
@ -189,6 +191,7 @@ message CreateStatsRequest {
|
|||||||
int64 task_slot = 24;
|
int64 task_slot = 24;
|
||||||
int64 storage_version = 25;
|
int64 storage_version = 25;
|
||||||
int32 current_scalar_index_version = 26;
|
int32 current_scalar_index_version = 26;
|
||||||
|
repeated common.KeyValuePair plugin_context = 27;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CreateJobV2Request {
|
message CreateJobV2Request {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,8 @@
|
|||||||
package message
|
package message
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/v2/hook"
|
"github.com/milvus-io/milvus-proto/go-api/v2/hook"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -8,15 +10,15 @@ import (
|
|||||||
// It should be initialized at initialization stage.
|
// It should be initialized at initialization stage.
|
||||||
var (
|
var (
|
||||||
cipher hook.Cipher
|
cipher hook.Cipher
|
||||||
|
initOnce sync.Once
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterCipher registers a cipher to be used for encrypting and decrypting messages.
|
// RegisterCipher registers a cipher to be used for encrypting and decrypting messages.
|
||||||
// It should be called only once when the program starts and initialization stage.
|
// It should be called only once when the program starts and initialization stage.
|
||||||
func RegisterCipher(c hook.Cipher) {
|
func RegisterCipher(c hook.Cipher) {
|
||||||
if cipher != nil {
|
initOnce.Do(func() {
|
||||||
panic("cipher already registered")
|
|
||||||
}
|
|
||||||
cipher = c
|
cipher = c
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// mustGetCipher returns the registered cipher.
|
// mustGetCipher returns the registered cipher.
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user