mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
Support partition Key (#24047)
Signed-off-by: xige-16 <xi.ge@zilliz.com>
This commit is contained in:
parent
697344e4bd
commit
732fe54775
239
internal/core/src/pb/plan.pb.cc
Executable file → Normal file
239
internal/core/src/pb/plan.pb.cc
Executable file → Normal file
@ -60,6 +60,7 @@ PROTOBUF_CONSTEXPR ColumnInfo::ColumnInfo(
|
||||
, /*decltype(_impl_.data_type_)*/0
|
||||
, /*decltype(_impl_.is_primary_key_)*/false
|
||||
, /*decltype(_impl_.is_autoid_)*/false
|
||||
, /*decltype(_impl_.is_partition_key_)*/false
|
||||
, /*decltype(_impl_._cached_size_)*/{}} {}
|
||||
struct ColumnInfoDefaultTypeInternal {
|
||||
PROTOBUF_CONSTEXPR ColumnInfoDefaultTypeInternal()
|
||||
@ -347,6 +348,7 @@ const uint32_t TableStruct_plan_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(pro
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::ColumnInfo, _impl_.is_primary_key_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::ColumnInfo, _impl_.is_autoid_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::ColumnInfo, _impl_.nested_path_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::ColumnInfo, _impl_.is_partition_key_),
|
||||
~0u, // no _has_bits_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::ColumnExpr, _internal_metadata_),
|
||||
~0u, // no _extensions_
|
||||
@ -504,22 +506,22 @@ static const ::_pbi::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protode
|
||||
{ 0, -1, -1, sizeof(::milvus::proto::plan::GenericValue)},
|
||||
{ 11, -1, -1, sizeof(::milvus::proto::plan::QueryInfo)},
|
||||
{ 21, -1, -1, sizeof(::milvus::proto::plan::ColumnInfo)},
|
||||
{ 32, -1, -1, sizeof(::milvus::proto::plan::ColumnExpr)},
|
||||
{ 39, -1, -1, sizeof(::milvus::proto::plan::ExistsExpr)},
|
||||
{ 46, -1, -1, sizeof(::milvus::proto::plan::ValueExpr)},
|
||||
{ 53, -1, -1, sizeof(::milvus::proto::plan::UnaryRangeExpr)},
|
||||
{ 62, -1, -1, sizeof(::milvus::proto::plan::BinaryRangeExpr)},
|
||||
{ 73, -1, -1, sizeof(::milvus::proto::plan::CompareExpr)},
|
||||
{ 82, -1, -1, sizeof(::milvus::proto::plan::TermExpr)},
|
||||
{ 90, -1, -1, sizeof(::milvus::proto::plan::UnaryExpr)},
|
||||
{ 98, -1, -1, sizeof(::milvus::proto::plan::BinaryExpr)},
|
||||
{ 107, -1, -1, sizeof(::milvus::proto::plan::BinaryArithOp)},
|
||||
{ 116, -1, -1, sizeof(::milvus::proto::plan::BinaryArithExpr)},
|
||||
{ 125, -1, -1, sizeof(::milvus::proto::plan::BinaryArithOpEvalRangeExpr)},
|
||||
{ 136, -1, -1, sizeof(::milvus::proto::plan::Expr)},
|
||||
{ 154, -1, -1, sizeof(::milvus::proto::plan::VectorANNS)},
|
||||
{ 165, -1, -1, sizeof(::milvus::proto::plan::QueryPlanNode)},
|
||||
{ 173, -1, -1, sizeof(::milvus::proto::plan::PlanNode)},
|
||||
{ 33, -1, -1, sizeof(::milvus::proto::plan::ColumnExpr)},
|
||||
{ 40, -1, -1, sizeof(::milvus::proto::plan::ExistsExpr)},
|
||||
{ 47, -1, -1, sizeof(::milvus::proto::plan::ValueExpr)},
|
||||
{ 54, -1, -1, sizeof(::milvus::proto::plan::UnaryRangeExpr)},
|
||||
{ 63, -1, -1, sizeof(::milvus::proto::plan::BinaryRangeExpr)},
|
||||
{ 74, -1, -1, sizeof(::milvus::proto::plan::CompareExpr)},
|
||||
{ 83, -1, -1, sizeof(::milvus::proto::plan::TermExpr)},
|
||||
{ 91, -1, -1, sizeof(::milvus::proto::plan::UnaryExpr)},
|
||||
{ 99, -1, -1, sizeof(::milvus::proto::plan::BinaryExpr)},
|
||||
{ 108, -1, -1, sizeof(::milvus::proto::plan::BinaryArithOp)},
|
||||
{ 117, -1, -1, sizeof(::milvus::proto::plan::BinaryArithExpr)},
|
||||
{ 126, -1, -1, sizeof(::milvus::proto::plan::BinaryArithOpEvalRangeExpr)},
|
||||
{ 137, -1, -1, sizeof(::milvus::proto::plan::Expr)},
|
||||
{ 155, -1, -1, sizeof(::milvus::proto::plan::VectorANNS)},
|
||||
{ 166, -1, -1, sizeof(::milvus::proto::plan::QueryPlanNode)},
|
||||
{ 174, -1, -1, sizeof(::milvus::proto::plan::PlanNode)},
|
||||
};
|
||||
|
||||
static const ::_pb::Message* const file_default_instances[] = {
|
||||
@ -551,98 +553,99 @@ const char descriptor_table_protodef_plan_2eproto[] PROTOBUF_SECTION_VARIABLE(pr
|
||||
"\001H\000\022\024\n\nstring_val\030\004 \001(\tH\000B\005\n\003val\"\\\n\tQuer"
|
||||
"yInfo\022\014\n\004topk\030\001 \001(\003\022\023\n\013metric_type\030\003 \001(\t"
|
||||
"\022\025\n\rsearch_params\030\004 \001(\t\022\025\n\rround_decimal"
|
||||
"\030\005 \001(\003\"\220\001\n\nColumnInfo\022\020\n\010field_id\030\001 \001(\003\022"
|
||||
"\030\005 \001(\003\"\252\001\n\nColumnInfo\022\020\n\010field_id\030\001 \001(\003\022"
|
||||
"0\n\tdata_type\030\002 \001(\0162\035.milvus.proto.schema"
|
||||
".DataType\022\026\n\016is_primary_key\030\003 \001(\010\022\021\n\tis_"
|
||||
"autoID\030\004 \001(\010\022\023\n\013nested_path\030\005 \003(\t\"9\n\nCol"
|
||||
"umnExpr\022+\n\004info\030\001 \001(\0132\035.milvus.proto.pla"
|
||||
"n.ColumnInfo\"9\n\nExistsExpr\022+\n\004info\030\001 \001(\013"
|
||||
"2\035.milvus.proto.plan.ColumnInfo\";\n\tValue"
|
||||
"Expr\022.\n\005value\030\001 \001(\0132\037.milvus.proto.plan."
|
||||
"GenericValue\"\233\001\n\016UnaryRangeExpr\0222\n\013colum"
|
||||
"n_info\030\001 \001(\0132\035.milvus.proto.plan.ColumnI"
|
||||
"nfo\022%\n\002op\030\002 \001(\0162\031.milvus.proto.plan.OpTy"
|
||||
"pe\022.\n\005value\030\003 \001(\0132\037.milvus.proto.plan.Ge"
|
||||
"nericValue\"\343\001\n\017BinaryRangeExpr\0222\n\013column"
|
||||
"_info\030\001 \001(\0132\035.milvus.proto.plan.ColumnIn"
|
||||
"fo\022\027\n\017lower_inclusive\030\002 \001(\010\022\027\n\017upper_inc"
|
||||
"lusive\030\003 \001(\010\0224\n\013lower_value\030\004 \001(\0132\037.milv"
|
||||
"us.proto.plan.GenericValue\0224\n\013upper_valu"
|
||||
"e\030\005 \001(\0132\037.milvus.proto.plan.GenericValue"
|
||||
"\"\247\001\n\013CompareExpr\0227\n\020left_column_info\030\001 \001"
|
||||
"(\0132\035.milvus.proto.plan.ColumnInfo\0228\n\021rig"
|
||||
"ht_column_info\030\002 \001(\0132\035.milvus.proto.plan"
|
||||
".ColumnInfo\022%\n\002op\030\003 \001(\0162\031.milvus.proto.p"
|
||||
"lan.OpType\"o\n\010TermExpr\0222\n\013column_info\030\001 "
|
||||
"\001(\0132\035.milvus.proto.plan.ColumnInfo\022/\n\006va"
|
||||
"lues\030\002 \003(\0132\037.milvus.proto.plan.GenericVa"
|
||||
"lue\"\206\001\n\tUnaryExpr\0220\n\002op\030\001 \001(\0162$.milvus.p"
|
||||
"roto.plan.UnaryExpr.UnaryOp\022&\n\005child\030\002 \001"
|
||||
"(\0132\027.milvus.proto.plan.Expr\"\037\n\007UnaryOp\022\013"
|
||||
"\n\007Invalid\020\000\022\007\n\003Not\020\001\"\307\001\n\nBinaryExpr\0222\n\002o"
|
||||
"p\030\001 \001(\0162&.milvus.proto.plan.BinaryExpr.B"
|
||||
"inaryOp\022%\n\004left\030\002 \001(\0132\027.milvus.proto.pla"
|
||||
"n.Expr\022&\n\005right\030\003 \001(\0132\027.milvus.proto.pla"
|
||||
"n.Expr\"6\n\010BinaryOp\022\013\n\007Invalid\020\000\022\016\n\nLogic"
|
||||
"alAnd\020\001\022\r\n\tLogicalOr\020\002\"\255\001\n\rBinaryArithOp"
|
||||
"\0222\n\013column_info\030\001 \001(\0132\035.milvus.proto.pla"
|
||||
"n.ColumnInfo\0220\n\010arith_op\030\002 \001(\0162\036.milvus."
|
||||
"proto.plan.ArithOpType\0226\n\rright_operand\030"
|
||||
"\003 \001(\0132\037.milvus.proto.plan.GenericValue\"\214"
|
||||
"\001\n\017BinaryArithExpr\022%\n\004left\030\001 \001(\0132\027.milvu"
|
||||
"s.proto.plan.Expr\022&\n\005right\030\002 \001(\0132\027.milvu"
|
||||
"s.proto.plan.Expr\022*\n\002op\030\003 \001(\0162\036.milvus.p"
|
||||
"roto.plan.ArithOpType\"\221\002\n\032BinaryArithOpE"
|
||||
"valRangeExpr\0222\n\013column_info\030\001 \001(\0132\035.milv"
|
||||
"us.proto.plan.ColumnInfo\0220\n\010arith_op\030\002 \001"
|
||||
"(\0162\036.milvus.proto.plan.ArithOpType\0226\n\rri"
|
||||
"ght_operand\030\003 \001(\0132\037.milvus.proto.plan.Ge"
|
||||
"nericValue\022%\n\002op\030\004 \001(\0162\031.milvus.proto.pl"
|
||||
"an.OpType\022.\n\005value\030\005 \001(\0132\037.milvus.proto."
|
||||
"plan.GenericValue\"\235\005\n\004Expr\0220\n\tterm_expr\030"
|
||||
"\001 \001(\0132\033.milvus.proto.plan.TermExprH\000\0222\n\n"
|
||||
"unary_expr\030\002 \001(\0132\034.milvus.proto.plan.Una"
|
||||
"ryExprH\000\0224\n\013binary_expr\030\003 \001(\0132\035.milvus.p"
|
||||
"roto.plan.BinaryExprH\000\0226\n\014compare_expr\030\004"
|
||||
" \001(\0132\036.milvus.proto.plan.CompareExprH\000\022="
|
||||
"\n\020unary_range_expr\030\005 \001(\0132!.milvus.proto."
|
||||
"plan.UnaryRangeExprH\000\022\?\n\021binary_range_ex"
|
||||
"pr\030\006 \001(\0132\".milvus.proto.plan.BinaryRange"
|
||||
"ExprH\000\022X\n\037binary_arith_op_eval_range_exp"
|
||||
"r\030\007 \001(\0132-.milvus.proto.plan.BinaryArithO"
|
||||
"pEvalRangeExprH\000\022\?\n\021binary_arith_expr\030\010 "
|
||||
"\001(\0132\".milvus.proto.plan.BinaryArithExprH"
|
||||
"\000\0222\n\nvalue_expr\030\t \001(\0132\034.milvus.proto.pla"
|
||||
"n.ValueExprH\000\0224\n\013column_expr\030\n \001(\0132\035.mil"
|
||||
"vus.proto.plan.ColumnExprH\000\0224\n\013exists_ex"
|
||||
"pr\030\013 \001(\0132\035.milvus.proto.plan.ExistsExprH"
|
||||
"\000B\006\n\004expr\"\251\001\n\nVectorANNS\022\021\n\tis_binary\030\001 "
|
||||
"\001(\010\022\020\n\010field_id\030\002 \001(\003\022+\n\npredicates\030\003 \001("
|
||||
"\0132\027.milvus.proto.plan.Expr\0220\n\nquery_info"
|
||||
"\030\004 \001(\0132\034.milvus.proto.plan.QueryInfo\022\027\n\017"
|
||||
"placeholder_tag\030\005 \001(\t\"N\n\rQueryPlanNode\022+"
|
||||
"\n\npredicates\030\001 \001(\0132\027.milvus.proto.plan.E"
|
||||
"xpr\022\020\n\010is_count\030\002 \001(\010\"\304\001\n\010PlanNode\0224\n\013ve"
|
||||
"ctor_anns\030\001 \001(\0132\035.milvus.proto.plan.Vect"
|
||||
"orANNSH\000\022-\n\npredicates\030\002 \001(\0132\027.milvus.pr"
|
||||
"oto.plan.ExprH\000\0221\n\005query\030\004 \001(\0132 .milvus."
|
||||
"proto.plan.QueryPlanNodeH\000\022\030\n\020output_fie"
|
||||
"ld_ids\030\003 \003(\003B\006\n\004node*\272\001\n\006OpType\022\013\n\007Inval"
|
||||
"id\020\000\022\017\n\013GreaterThan\020\001\022\020\n\014GreaterEqual\020\002\022"
|
||||
"\014\n\010LessThan\020\003\022\r\n\tLessEqual\020\004\022\t\n\005Equal\020\005\022"
|
||||
"\014\n\010NotEqual\020\006\022\017\n\013PrefixMatch\020\007\022\020\n\014Postfi"
|
||||
"xMatch\020\010\022\t\n\005Match\020\t\022\t\n\005Range\020\n\022\006\n\002In\020\013\022\t"
|
||||
"\n\005NotIn\020\014*G\n\013ArithOpType\022\013\n\007Unknown\020\000\022\007\n"
|
||||
"\003Add\020\001\022\007\n\003Sub\020\002\022\007\n\003Mul\020\003\022\007\n\003Div\020\004\022\007\n\003Mod"
|
||||
"\020\005B3Z1github.com/milvus-io/milvus/intern"
|
||||
"al/proto/planpbb\006proto3"
|
||||
"autoID\030\004 \001(\010\022\023\n\013nested_path\030\005 \003(\t\022\030\n\020is_"
|
||||
"partition_key\030\006 \001(\010\"9\n\nColumnExpr\022+\n\004inf"
|
||||
"o\030\001 \001(\0132\035.milvus.proto.plan.ColumnInfo\"9"
|
||||
"\n\nExistsExpr\022+\n\004info\030\001 \001(\0132\035.milvus.prot"
|
||||
"o.plan.ColumnInfo\";\n\tValueExpr\022.\n\005value\030"
|
||||
"\001 \001(\0132\037.milvus.proto.plan.GenericValue\"\233"
|
||||
"\001\n\016UnaryRangeExpr\0222\n\013column_info\030\001 \001(\0132\035"
|
||||
".milvus.proto.plan.ColumnInfo\022%\n\002op\030\002 \001("
|
||||
"\0162\031.milvus.proto.plan.OpType\022.\n\005value\030\003 "
|
||||
"\001(\0132\037.milvus.proto.plan.GenericValue\"\343\001\n"
|
||||
"\017BinaryRangeExpr\0222\n\013column_info\030\001 \001(\0132\035."
|
||||
"milvus.proto.plan.ColumnInfo\022\027\n\017lower_in"
|
||||
"clusive\030\002 \001(\010\022\027\n\017upper_inclusive\030\003 \001(\010\0224"
|
||||
"\n\013lower_value\030\004 \001(\0132\037.milvus.proto.plan."
|
||||
"GenericValue\0224\n\013upper_value\030\005 \001(\0132\037.milv"
|
||||
"us.proto.plan.GenericValue\"\247\001\n\013CompareEx"
|
||||
"pr\0227\n\020left_column_info\030\001 \001(\0132\035.milvus.pr"
|
||||
"oto.plan.ColumnInfo\0228\n\021right_column_info"
|
||||
"\030\002 \001(\0132\035.milvus.proto.plan.ColumnInfo\022%\n"
|
||||
"\002op\030\003 \001(\0162\031.milvus.proto.plan.OpType\"o\n\010"
|
||||
"TermExpr\0222\n\013column_info\030\001 \001(\0132\035.milvus.p"
|
||||
"roto.plan.ColumnInfo\022/\n\006values\030\002 \003(\0132\037.m"
|
||||
"ilvus.proto.plan.GenericValue\"\206\001\n\tUnaryE"
|
||||
"xpr\0220\n\002op\030\001 \001(\0162$.milvus.proto.plan.Unar"
|
||||
"yExpr.UnaryOp\022&\n\005child\030\002 \001(\0132\027.milvus.pr"
|
||||
"oto.plan.Expr\"\037\n\007UnaryOp\022\013\n\007Invalid\020\000\022\007\n"
|
||||
"\003Not\020\001\"\307\001\n\nBinaryExpr\0222\n\002op\030\001 \001(\0162&.milv"
|
||||
"us.proto.plan.BinaryExpr.BinaryOp\022%\n\004lef"
|
||||
"t\030\002 \001(\0132\027.milvus.proto.plan.Expr\022&\n\005righ"
|
||||
"t\030\003 \001(\0132\027.milvus.proto.plan.Expr\"6\n\010Bina"
|
||||
"ryOp\022\013\n\007Invalid\020\000\022\016\n\nLogicalAnd\020\001\022\r\n\tLog"
|
||||
"icalOr\020\002\"\255\001\n\rBinaryArithOp\0222\n\013column_inf"
|
||||
"o\030\001 \001(\0132\035.milvus.proto.plan.ColumnInfo\0220"
|
||||
"\n\010arith_op\030\002 \001(\0162\036.milvus.proto.plan.Ari"
|
||||
"thOpType\0226\n\rright_operand\030\003 \001(\0132\037.milvus"
|
||||
".proto.plan.GenericValue\"\214\001\n\017BinaryArith"
|
||||
"Expr\022%\n\004left\030\001 \001(\0132\027.milvus.proto.plan.E"
|
||||
"xpr\022&\n\005right\030\002 \001(\0132\027.milvus.proto.plan.E"
|
||||
"xpr\022*\n\002op\030\003 \001(\0162\036.milvus.proto.plan.Arit"
|
||||
"hOpType\"\221\002\n\032BinaryArithOpEvalRangeExpr\0222"
|
||||
"\n\013column_info\030\001 \001(\0132\035.milvus.proto.plan."
|
||||
"ColumnInfo\0220\n\010arith_op\030\002 \001(\0162\036.milvus.pr"
|
||||
"oto.plan.ArithOpType\0226\n\rright_operand\030\003 "
|
||||
"\001(\0132\037.milvus.proto.plan.GenericValue\022%\n\002"
|
||||
"op\030\004 \001(\0162\031.milvus.proto.plan.OpType\022.\n\005v"
|
||||
"alue\030\005 \001(\0132\037.milvus.proto.plan.GenericVa"
|
||||
"lue\"\235\005\n\004Expr\0220\n\tterm_expr\030\001 \001(\0132\033.milvus"
|
||||
".proto.plan.TermExprH\000\0222\n\nunary_expr\030\002 \001"
|
||||
"(\0132\034.milvus.proto.plan.UnaryExprH\000\0224\n\013bi"
|
||||
"nary_expr\030\003 \001(\0132\035.milvus.proto.plan.Bina"
|
||||
"ryExprH\000\0226\n\014compare_expr\030\004 \001(\0132\036.milvus."
|
||||
"proto.plan.CompareExprH\000\022=\n\020unary_range_"
|
||||
"expr\030\005 \001(\0132!.milvus.proto.plan.UnaryRang"
|
||||
"eExprH\000\022\?\n\021binary_range_expr\030\006 \001(\0132\".mil"
|
||||
"vus.proto.plan.BinaryRangeExprH\000\022X\n\037bina"
|
||||
"ry_arith_op_eval_range_expr\030\007 \001(\0132-.milv"
|
||||
"us.proto.plan.BinaryArithOpEvalRangeExpr"
|
||||
"H\000\022\?\n\021binary_arith_expr\030\010 \001(\0132\".milvus.p"
|
||||
"roto.plan.BinaryArithExprH\000\0222\n\nvalue_exp"
|
||||
"r\030\t \001(\0132\034.milvus.proto.plan.ValueExprH\000\022"
|
||||
"4\n\013column_expr\030\n \001(\0132\035.milvus.proto.plan"
|
||||
".ColumnExprH\000\0224\n\013exists_expr\030\013 \001(\0132\035.mil"
|
||||
"vus.proto.plan.ExistsExprH\000B\006\n\004expr\"\251\001\n\n"
|
||||
"VectorANNS\022\021\n\tis_binary\030\001 \001(\010\022\020\n\010field_i"
|
||||
"d\030\002 \001(\003\022+\n\npredicates\030\003 \001(\0132\027.milvus.pro"
|
||||
"to.plan.Expr\0220\n\nquery_info\030\004 \001(\0132\034.milvu"
|
||||
"s.proto.plan.QueryInfo\022\027\n\017placeholder_ta"
|
||||
"g\030\005 \001(\t\"N\n\rQueryPlanNode\022+\n\npredicates\030\001"
|
||||
" \001(\0132\027.milvus.proto.plan.Expr\022\020\n\010is_coun"
|
||||
"t\030\002 \001(\010\"\304\001\n\010PlanNode\0224\n\013vector_anns\030\001 \001("
|
||||
"\0132\035.milvus.proto.plan.VectorANNSH\000\022-\n\npr"
|
||||
"edicates\030\002 \001(\0132\027.milvus.proto.plan.ExprH"
|
||||
"\000\0221\n\005query\030\004 \001(\0132 .milvus.proto.plan.Que"
|
||||
"ryPlanNodeH\000\022\030\n\020output_field_ids\030\003 \003(\003B\006"
|
||||
"\n\004node*\272\001\n\006OpType\022\013\n\007Invalid\020\000\022\017\n\013Greate"
|
||||
"rThan\020\001\022\020\n\014GreaterEqual\020\002\022\014\n\010LessThan\020\003\022"
|
||||
"\r\n\tLessEqual\020\004\022\t\n\005Equal\020\005\022\014\n\010NotEqual\020\006\022"
|
||||
"\017\n\013PrefixMatch\020\007\022\020\n\014PostfixMatch\020\010\022\t\n\005Ma"
|
||||
"tch\020\t\022\t\n\005Range\020\n\022\006\n\002In\020\013\022\t\n\005NotIn\020\014*G\n\013A"
|
||||
"rithOpType\022\013\n\007Unknown\020\000\022\007\n\003Add\020\001\022\007\n\003Sub\020"
|
||||
"\002\022\007\n\003Mul\020\003\022\007\n\003Div\020\004\022\007\n\003Mod\020\005B3Z1github.c"
|
||||
"om/milvus-io/milvus/internal/proto/planp"
|
||||
"bb\006proto3"
|
||||
;
|
||||
static const ::_pbi::DescriptorTable* const descriptor_table_plan_2eproto_deps[1] = {
|
||||
&::descriptor_table_schema_2eproto,
|
||||
};
|
||||
static ::_pbi::once_flag descriptor_table_plan_2eproto_once;
|
||||
const ::_pbi::DescriptorTable descriptor_table_plan_2eproto = {
|
||||
false, false, 3623, descriptor_table_protodef_plan_2eproto,
|
||||
false, false, 3649, descriptor_table_protodef_plan_2eproto,
|
||||
"plan.proto",
|
||||
&descriptor_table_plan_2eproto_once, descriptor_table_plan_2eproto_deps, 1, 19,
|
||||
schemas, file_default_instances, TableStruct_plan_2eproto::offsets,
|
||||
@ -1401,12 +1404,13 @@ ColumnInfo::ColumnInfo(const ColumnInfo& from)
|
||||
, decltype(_impl_.data_type_){}
|
||||
, decltype(_impl_.is_primary_key_){}
|
||||
, decltype(_impl_.is_autoid_){}
|
||||
, decltype(_impl_.is_partition_key_){}
|
||||
, /*decltype(_impl_._cached_size_)*/{}};
|
||||
|
||||
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
|
||||
::memcpy(&_impl_.field_id_, &from._impl_.field_id_,
|
||||
static_cast<size_t>(reinterpret_cast<char*>(&_impl_.is_autoid_) -
|
||||
reinterpret_cast<char*>(&_impl_.field_id_)) + sizeof(_impl_.is_autoid_));
|
||||
static_cast<size_t>(reinterpret_cast<char*>(&_impl_.is_partition_key_) -
|
||||
reinterpret_cast<char*>(&_impl_.field_id_)) + sizeof(_impl_.is_partition_key_));
|
||||
// @@protoc_insertion_point(copy_constructor:milvus.proto.plan.ColumnInfo)
|
||||
}
|
||||
|
||||
@ -1420,6 +1424,7 @@ inline void ColumnInfo::SharedCtor(
|
||||
, decltype(_impl_.data_type_){0}
|
||||
, decltype(_impl_.is_primary_key_){false}
|
||||
, decltype(_impl_.is_autoid_){false}
|
||||
, decltype(_impl_.is_partition_key_){false}
|
||||
, /*decltype(_impl_._cached_size_)*/{}
|
||||
};
|
||||
}
|
||||
@ -1450,8 +1455,8 @@ void ColumnInfo::Clear() {
|
||||
|
||||
_impl_.nested_path_.Clear();
|
||||
::memset(&_impl_.field_id_, 0, static_cast<size_t>(
|
||||
reinterpret_cast<char*>(&_impl_.is_autoid_) -
|
||||
reinterpret_cast<char*>(&_impl_.field_id_)) + sizeof(_impl_.is_autoid_));
|
||||
reinterpret_cast<char*>(&_impl_.is_partition_key_) -
|
||||
reinterpret_cast<char*>(&_impl_.field_id_)) + sizeof(_impl_.is_partition_key_));
|
||||
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
|
||||
}
|
||||
|
||||
@ -1509,6 +1514,14 @@ const char* ColumnInfo::_InternalParse(const char* ptr, ::_pbi::ParseContext* ct
|
||||
} else
|
||||
goto handle_unusual;
|
||||
continue;
|
||||
// bool is_partition_key = 6;
|
||||
case 6:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 48)) {
|
||||
_impl_.is_partition_key_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint64(&ptr);
|
||||
CHK_(ptr);
|
||||
} else
|
||||
goto handle_unusual;
|
||||
continue;
|
||||
default:
|
||||
goto handle_unusual;
|
||||
} // switch
|
||||
@ -1573,6 +1586,12 @@ uint8_t* ColumnInfo::_InternalSerialize(
|
||||
target = stream->WriteString(5, s, target);
|
||||
}
|
||||
|
||||
// bool is_partition_key = 6;
|
||||
if (this->_internal_is_partition_key() != 0) {
|
||||
target = stream->EnsureSpace(target);
|
||||
target = ::_pbi::WireFormatLite::WriteBoolToArray(6, this->_internal_is_partition_key(), target);
|
||||
}
|
||||
|
||||
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
|
||||
target = ::_pbi::WireFormat::InternalSerializeUnknownFieldsToArray(
|
||||
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
|
||||
@ -1618,6 +1637,11 @@ size_t ColumnInfo::ByteSizeLong() const {
|
||||
total_size += 1 + 1;
|
||||
}
|
||||
|
||||
// bool is_partition_key = 6;
|
||||
if (this->_internal_is_partition_key() != 0) {
|
||||
total_size += 1 + 1;
|
||||
}
|
||||
|
||||
return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_);
|
||||
}
|
||||
|
||||
@ -1649,6 +1673,9 @@ void ColumnInfo::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PRO
|
||||
if (from._internal_is_autoid() != 0) {
|
||||
_this->_internal_set_is_autoid(from._internal_is_autoid());
|
||||
}
|
||||
if (from._internal_is_partition_key() != 0) {
|
||||
_this->_internal_set_is_partition_key(from._internal_is_partition_key());
|
||||
}
|
||||
_this->_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
|
||||
}
|
||||
|
||||
@ -1668,8 +1695,8 @@ void ColumnInfo::InternalSwap(ColumnInfo* other) {
|
||||
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
|
||||
_impl_.nested_path_.InternalSwap(&other->_impl_.nested_path_);
|
||||
::PROTOBUF_NAMESPACE_ID::internal::memswap<
|
||||
PROTOBUF_FIELD_OFFSET(ColumnInfo, _impl_.is_autoid_)
|
||||
+ sizeof(ColumnInfo::_impl_.is_autoid_)
|
||||
PROTOBUF_FIELD_OFFSET(ColumnInfo, _impl_.is_partition_key_)
|
||||
+ sizeof(ColumnInfo::_impl_.is_partition_key_)
|
||||
- PROTOBUF_FIELD_OFFSET(ColumnInfo, _impl_.field_id_)>(
|
||||
reinterpret_cast<char*>(&_impl_.field_id_),
|
||||
reinterpret_cast<char*>(&other->_impl_.field_id_));
|
||||
|
||||
@ -794,6 +794,7 @@ class ColumnInfo final :
|
||||
kDataTypeFieldNumber = 2,
|
||||
kIsPrimaryKeyFieldNumber = 3,
|
||||
kIsAutoIDFieldNumber = 4,
|
||||
kIsPartitionKeyFieldNumber = 6,
|
||||
};
|
||||
// repeated string nested_path = 5;
|
||||
int nested_path_size() const;
|
||||
@ -855,6 +856,15 @@ class ColumnInfo final :
|
||||
void _internal_set_is_autoid(bool value);
|
||||
public:
|
||||
|
||||
// bool is_partition_key = 6;
|
||||
void clear_is_partition_key();
|
||||
bool is_partition_key() const;
|
||||
void set_is_partition_key(bool value);
|
||||
private:
|
||||
bool _internal_is_partition_key() const;
|
||||
void _internal_set_is_partition_key(bool value);
|
||||
public:
|
||||
|
||||
// @@protoc_insertion_point(class_scope:milvus.proto.plan.ColumnInfo)
|
||||
private:
|
||||
class _Internal;
|
||||
@ -868,6 +878,7 @@ class ColumnInfo final :
|
||||
int data_type_;
|
||||
bool is_primary_key_;
|
||||
bool is_autoid_;
|
||||
bool is_partition_key_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
};
|
||||
union { Impl_ _impl_; };
|
||||
@ -4663,6 +4674,26 @@ ColumnInfo::mutable_nested_path() {
|
||||
return &_impl_.nested_path_;
|
||||
}
|
||||
|
||||
// bool is_partition_key = 6;
|
||||
inline void ColumnInfo::clear_is_partition_key() {
|
||||
_impl_.is_partition_key_ = false;
|
||||
}
|
||||
inline bool ColumnInfo::_internal_is_partition_key() const {
|
||||
return _impl_.is_partition_key_;
|
||||
}
|
||||
inline bool ColumnInfo::is_partition_key() const {
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.plan.ColumnInfo.is_partition_key)
|
||||
return _internal_is_partition_key();
|
||||
}
|
||||
inline void ColumnInfo::_internal_set_is_partition_key(bool value) {
|
||||
|
||||
_impl_.is_partition_key_ = value;
|
||||
}
|
||||
inline void ColumnInfo::set_is_partition_key(bool value) {
|
||||
_internal_set_is_partition_key(value);
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.plan.ColumnInfo.is_partition_key)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// ColumnExpr
|
||||
|
||||
0
internal/core/src/pb/schema.pb.cc
Executable file → Normal file
0
internal/core/src/pb/schema.pb.cc
Executable file → Normal file
0
internal/core/src/pb/schema.pb.h
Executable file → Normal file
0
internal/core/src/pb/schema.pb.h
Executable file → Normal file
@ -18,6 +18,7 @@ type Field struct {
|
||||
AutoID bool
|
||||
State schemapb.FieldState
|
||||
IsDynamic bool
|
||||
IsPartitionKey bool // partition key mode, multi logic partitions share a physical partition
|
||||
DefaultValue *schemapb.ValueField
|
||||
}
|
||||
|
||||
@ -37,6 +38,7 @@ func (f Field) Clone() *Field {
|
||||
AutoID: f.AutoID,
|
||||
State: f.State,
|
||||
IsDynamic: f.IsDynamic,
|
||||
IsPartitionKey: f.IsPartitionKey,
|
||||
DefaultValue: f.DefaultValue,
|
||||
}
|
||||
}
|
||||
@ -63,6 +65,7 @@ func (f Field) Equal(other Field) bool {
|
||||
checkParamsEqual(f.TypeParams, f.TypeParams) &&
|
||||
checkParamsEqual(f.IndexParams, other.IndexParams) &&
|
||||
f.AutoID == other.AutoID &&
|
||||
f.IsPartitionKey == other.IsPartitionKey &&
|
||||
f.IsDynamic == other.IsDynamic &&
|
||||
f.DefaultValue == other.DefaultValue
|
||||
}
|
||||
@ -95,6 +98,7 @@ func MarshalFieldModel(field *Field) *schemapb.FieldSchema {
|
||||
IndexParams: field.IndexParams,
|
||||
AutoID: field.AutoID,
|
||||
IsDynamic: field.IsDynamic,
|
||||
IsPartitionKey: field.IsPartitionKey,
|
||||
DefaultValue: field.DefaultValue,
|
||||
}
|
||||
}
|
||||
@ -126,6 +130,7 @@ func UnmarshalFieldModel(fieldSchema *schemapb.FieldSchema) *Field {
|
||||
IndexParams: fieldSchema.IndexParams,
|
||||
AutoID: fieldSchema.AutoID,
|
||||
IsDynamic: fieldSchema.IsDynamic,
|
||||
IsPartitionKey: fieldSchema.IsPartitionKey,
|
||||
DefaultValue: fieldSchema.DefaultValue,
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,6 +48,7 @@ func (v *ParserVisitor) translateIdentifier(identifier string) (*ExprWithType, e
|
||||
IsPrimaryKey: field.IsPrimaryKey,
|
||||
IsAutoID: field.AutoID,
|
||||
NestedPath: nestedPath,
|
||||
IsPartitionKey: field.IsPartitionKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@ -51,6 +51,7 @@ message ColumnInfo {
|
||||
bool is_primary_key = 3;
|
||||
bool is_autoID = 4;
|
||||
repeated string nested_path = 5;
|
||||
bool is_partition_key = 6;
|
||||
}
|
||||
|
||||
message ColumnExpr {
|
||||
|
||||
@ -348,6 +348,7 @@ type ColumnInfo struct {
|
||||
IsPrimaryKey bool `protobuf:"varint,3,opt,name=is_primary_key,json=isPrimaryKey,proto3" json:"is_primary_key,omitempty"`
|
||||
IsAutoID bool `protobuf:"varint,4,opt,name=is_autoID,json=isAutoID,proto3" json:"is_autoID,omitempty"`
|
||||
NestedPath []string `protobuf:"bytes,5,rep,name=nested_path,json=nestedPath,proto3" json:"nested_path,omitempty"`
|
||||
IsPartitionKey bool `protobuf:"varint,6,opt,name=is_partition_key,json=isPartitionKey,proto3" json:"is_partition_key,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@ -413,6 +414,13 @@ func (m *ColumnInfo) GetNestedPath() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ColumnInfo) GetIsPartitionKey() bool {
|
||||
if m != nil {
|
||||
return m.IsPartitionKey
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ColumnExpr struct {
|
||||
Info *ColumnInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
@ -1506,99 +1514,101 @@ func init() {
|
||||
func init() { proto.RegisterFile("plan.proto", fileDescriptor_2d655ab2f7683c23) }
|
||||
|
||||
var fileDescriptor_2d655ab2f7683c23 = []byte{
|
||||
// 1504 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0xcd, 0x72, 0xdc, 0x36,
|
||||
0x12, 0x1e, 0xce, 0x2f, 0xd9, 0x33, 0x1a, 0xd1, 0xbc, 0xac, 0x6c, 0xaf, 0x2d, 0x2d, 0xd7, 0xb5,
|
||||
0xd6, 0x7a, 0xcb, 0x52, 0x79, 0xed, 0xb5, 0xd7, 0xde, 0xf2, 0x46, 0xbf, 0xd1, 0x4c, 0xc5, 0x96,
|
||||
0x14, 0x5a, 0xd6, 0x21, 0x17, 0x16, 0x86, 0x84, 0x34, 0x28, 0x73, 0x00, 0x9a, 0x04, 0xc7, 0xd2,
|
||||
0x39, 0xb7, 0xdc, 0xf2, 0x00, 0x39, 0xe7, 0x9e, 0x63, 0x2a, 0x55, 0x39, 0xe6, 0x92, 0x43, 0x8e,
|
||||
0xb9, 0xe7, 0x11, 0xf2, 0x02, 0x29, 0x34, 0x38, 0x7f, 0xaa, 0x19, 0x6b, 0x94, 0xb8, 0x2a, 0x37,
|
||||
0xa0, 0x81, 0xfe, 0xd0, 0xfd, 0x75, 0xa3, 0xd1, 0x00, 0x88, 0x23, 0xc2, 0xd7, 0xe2, 0x44, 0x48,
|
||||
0xe1, 0x5c, 0xeb, 0xb1, 0xa8, 0x9f, 0xa5, 0x7a, 0xb6, 0xa6, 0x16, 0x6e, 0x34, 0xd2, 0xa0, 0x4b,
|
||||
0x7b, 0x44, 0x8b, 0xdc, 0x2f, 0x0d, 0x68, 0xec, 0x51, 0x4e, 0x13, 0x16, 0x1c, 0x93, 0x28, 0xa3,
|
||||
0xce, 0x4d, 0x30, 0x3b, 0x42, 0x44, 0x7e, 0x9f, 0x44, 0x4b, 0xc6, 0x8a, 0xb1, 0x6a, 0xb6, 0x0a,
|
||||
0x5e, 0x4d, 0x49, 0x8e, 0x49, 0xe4, 0xdc, 0x02, 0x8b, 0x71, 0xf9, 0xf8, 0x11, 0xae, 0x16, 0x57,
|
||||
0x8c, 0xd5, 0x52, 0xab, 0xe0, 0x99, 0x28, 0xca, 0x97, 0x4f, 0x22, 0x41, 0x24, 0x2e, 0x97, 0x56,
|
||||
0x8c, 0x55, 0x43, 0x2d, 0xa3, 0x48, 0x2d, 0x2f, 0x03, 0xa4, 0x32, 0x61, 0xfc, 0x14, 0xd7, 0xcb,
|
||||
0x2b, 0xc6, 0xaa, 0xd5, 0x2a, 0x78, 0x96, 0x96, 0x1d, 0x93, 0x68, 0xab, 0x02, 0xa5, 0x3e, 0x89,
|
||||
0xdc, 0x2f, 0x0c, 0xb0, 0x3e, 0xcd, 0x68, 0x72, 0xde, 0xe6, 0x27, 0xc2, 0x71, 0xa0, 0x2c, 0x45,
|
||||
0xfc, 0x06, 0x8d, 0x29, 0x79, 0x38, 0x76, 0x96, 0xa1, 0xde, 0xa3, 0x32, 0x61, 0x81, 0x2f, 0xcf,
|
||||
0x63, 0x8a, 0x47, 0x59, 0x1e, 0x68, 0xd1, 0xd1, 0x79, 0x4c, 0x9d, 0xbf, 0xc3, 0x42, 0x4a, 0x49,
|
||||
0x12, 0x74, 0xfd, 0x98, 0x24, 0xa4, 0x97, 0xea, 0xd3, 0xbc, 0x86, 0x16, 0x1e, 0xa2, 0x4c, 0x6d,
|
||||
0x4a, 0x44, 0xc6, 0x43, 0x3f, 0xa4, 0x01, 0xeb, 0x91, 0x68, 0xa9, 0x82, 0x47, 0x34, 0x50, 0xb8,
|
||||
0xa3, 0x65, 0xee, 0x0f, 0x06, 0xc0, 0xb6, 0x88, 0xb2, 0x1e, 0x47, 0x6b, 0xae, 0x83, 0x79, 0xc2,
|
||||
0x68, 0x14, 0xfa, 0x2c, 0xcc, 0x2d, 0xaa, 0xe1, 0xbc, 0x1d, 0x3a, 0xcf, 0xc0, 0x0a, 0x89, 0x24,
|
||||
0xda, 0x24, 0x45, 0x4e, 0xf3, 0xdf, 0xb7, 0xd6, 0x26, 0xf8, 0xcf, 0x99, 0xdf, 0x21, 0x92, 0x28,
|
||||
0x2b, 0x3d, 0x33, 0xcc, 0x47, 0xce, 0x1d, 0x68, 0xb2, 0xd4, 0x8f, 0x13, 0xd6, 0x23, 0xc9, 0xb9,
|
||||
0xff, 0x86, 0x9e, 0xa3, 0x4f, 0xa6, 0xd7, 0x60, 0xe9, 0xa1, 0x16, 0x7e, 0x42, 0xcf, 0x9d, 0x9b,
|
||||
0x60, 0xb1, 0xd4, 0x27, 0x99, 0x14, 0xed, 0x1d, 0xf4, 0xc8, 0xf4, 0x4c, 0x96, 0x6e, 0xe2, 0x5c,
|
||||
0x71, 0xc2, 0x69, 0x2a, 0x69, 0xe8, 0xc7, 0x44, 0x76, 0x97, 0x2a, 0x2b, 0x25, 0xc5, 0x89, 0x16,
|
||||
0x1d, 0x12, 0xd9, 0x75, 0x3f, 0x1a, 0x38, 0xb2, 0x7b, 0x16, 0x27, 0xce, 0x03, 0x28, 0x33, 0x7e,
|
||||
0x22, 0xd0, 0x89, 0xfa, 0x45, 0x43, 0x31, 0x83, 0x46, 0x5e, 0x7b, 0xb8, 0x55, 0x01, 0xec, 0x9e,
|
||||
0xb1, 0x54, 0xa6, 0xbf, 0x17, 0x60, 0x0b, 0x2c, 0x4c, 0x32, 0xd4, 0xff, 0x0f, 0x54, 0xfa, 0x6a,
|
||||
0x92, 0x03, 0x2c, 0x4f, 0x01, 0x18, 0x4f, 0x4c, 0x4f, 0xef, 0x76, 0xbf, 0x31, 0xa0, 0xf9, 0x9a,
|
||||
0x93, 0xe4, 0xdc, 0x23, 0xfc, 0x54, 0x23, 0xfd, 0x1f, 0xea, 0x01, 0x1e, 0xe5, 0xcf, 0x6f, 0x10,
|
||||
0x04, 0xa3, 0x98, 0xfe, 0x13, 0x8a, 0x22, 0xce, 0x23, 0x76, 0x7d, 0x8a, 0xda, 0x41, 0x8c, 0xd1,
|
||||
0x2a, 0x8a, 0x78, 0x64, 0x74, 0xe9, 0x4a, 0x46, 0x7f, 0x5d, 0x84, 0xc5, 0x2d, 0xf6, 0x61, 0xad,
|
||||
0xbe, 0x0b, 0x8b, 0x91, 0x78, 0x47, 0x13, 0x9f, 0xf1, 0x20, 0xca, 0x52, 0xd6, 0xd7, 0x49, 0x67,
|
||||
0x7a, 0x4d, 0x14, 0xb7, 0x07, 0x52, 0xb5, 0x31, 0x8b, 0xe3, 0x89, 0x8d, 0x3a, 0xb9, 0x9a, 0x28,
|
||||
0x1e, 0x6d, 0xdc, 0x80, 0xba, 0x46, 0xd4, 0x2e, 0x96, 0xe7, 0x73, 0x11, 0x50, 0x47, 0x17, 0x8f,
|
||||
0x0d, 0xa8, 0xeb, 0xa3, 0x34, 0x42, 0x65, 0x4e, 0x04, 0xd4, 0xc1, 0xb1, 0xfb, 0xa3, 0x01, 0xf5,
|
||||
0x6d, 0xd1, 0x8b, 0x49, 0xa2, 0x59, 0xda, 0x03, 0x3b, 0xa2, 0x27, 0xd2, 0xbf, 0x32, 0x55, 0x4d,
|
||||
0xa5, 0x36, 0x76, 0x71, 0xdb, 0x70, 0x2d, 0x61, 0xa7, 0xdd, 0x49, 0xa4, 0xe2, 0x3c, 0x48, 0x8b,
|
||||
0xa8, 0xb7, 0x7d, 0x31, 0x5f, 0x4a, 0x73, 0xe4, 0x8b, 0xfb, 0xb9, 0x01, 0xe6, 0x11, 0x4d, 0x7a,
|
||||
0x1f, 0x24, 0xe2, 0x4f, 0xa0, 0x8a, 0xbc, 0xa6, 0x4b, 0xc5, 0x95, 0xd2, 0x3c, 0xc4, 0xe6, 0xdb,
|
||||
0x55, 0x91, 0xb7, 0xf0, 0xce, 0xa0, 0x19, 0x8f, 0xd0, 0x7c, 0x03, 0xcd, 0xbf, 0x33, 0x05, 0x62,
|
||||
0xb8, 0x53, 0x8f, 0x0e, 0x62, 0xcc, 0xfc, 0xfb, 0x50, 0x09, 0xba, 0x2c, 0x0a, 0x73, 0xce, 0xfe,
|
||||
0x32, 0x45, 0x51, 0xe9, 0x78, 0x7a, 0x97, 0xbb, 0x0c, 0xb5, 0x5c, 0xdb, 0xa9, 0x43, 0xad, 0xcd,
|
||||
0xfb, 0x24, 0x62, 0xa1, 0x5d, 0x70, 0x6a, 0x50, 0xda, 0x17, 0xd2, 0x36, 0xdc, 0x9f, 0x0d, 0x00,
|
||||
0x7d, 0x25, 0xd0, 0xa8, 0xc7, 0x63, 0x46, 0xfd, 0x63, 0x0a, 0xf6, 0x68, 0x6b, 0x3e, 0xcc, 0xcd,
|
||||
0xfa, 0x17, 0x94, 0x55, 0xa0, 0x2f, 0xb3, 0x0a, 0x37, 0x29, 0x1f, 0x30, 0x96, 0xf9, 0xed, 0x9d,
|
||||
0xed, 0x03, 0xee, 0x72, 0x1f, 0x83, 0x39, 0x38, 0x6b, 0xd2, 0x89, 0x26, 0xc0, 0x0b, 0x71, 0xca,
|
||||
0x02, 0x12, 0x6d, 0xf2, 0xd0, 0x36, 0x9c, 0x05, 0xb0, 0xf2, 0xf9, 0x41, 0x62, 0x17, 0xdd, 0x9f,
|
||||
0x0c, 0x58, 0xd0, 0x8a, 0x9b, 0x09, 0x93, 0xdd, 0x83, 0xf8, 0x0f, 0x47, 0xfe, 0x29, 0x98, 0x44,
|
||||
0x41, 0xf9, 0xc3, 0x3a, 0x75, 0x7b, 0x8a, 0x72, 0x7e, 0x1a, 0x26, 0x5f, 0x8d, 0xe4, 0x47, 0xef,
|
||||
0xc0, 0x82, 0xce, 0x7b, 0x11, 0xd3, 0x84, 0xf0, 0x70, 0xde, 0xca, 0xd5, 0x40, 0xad, 0x03, 0xad,
|
||||
0xe4, 0x7e, 0x65, 0x0c, 0x0a, 0x18, 0x1e, 0x82, 0x21, 0x1b, 0x50, 0x6f, 0x5c, 0x89, 0xfa, 0xe2,
|
||||
0x3c, 0xd4, 0x3b, 0x6b, 0x63, 0x57, 0xec, 0x32, 0x57, 0xd5, 0x3d, 0xfb, 0xbe, 0x08, 0x37, 0x26,
|
||||
0x28, 0xdf, 0xed, 0x93, 0xe8, 0xc3, 0xd5, 0xda, 0x3f, 0x9b, 0xff, 0xbc, 0xe4, 0x94, 0xaf, 0xf4,
|
||||
0x44, 0x55, 0xae, 0xf4, 0x44, 0x7d, 0x57, 0x85, 0x32, 0x72, 0xf5, 0x0c, 0x2c, 0x49, 0x93, 0x9e,
|
||||
0x4f, 0xcf, 0xe2, 0x24, 0x67, 0xea, 0xe6, 0x14, 0x8c, 0x41, 0x55, 0x53, 0x1d, 0x9e, 0x1c, 0x54,
|
||||
0xb8, 0xe7, 0x00, 0x99, 0x0a, 0x82, 0x56, 0xd6, 0xa1, 0xfe, 0xeb, 0xfb, 0x4a, 0x8c, 0xea, 0xff,
|
||||
0xb2, 0x61, 0x11, 0xd8, 0x80, 0x7a, 0x87, 0x8d, 0xf4, 0x4b, 0x33, 0xc3, 0x34, 0xaa, 0x06, 0xad,
|
||||
0x82, 0x07, 0x9d, 0x51, 0x19, 0xd9, 0x86, 0x46, 0xa0, 0x5f, 0x0f, 0x0d, 0xa1, 0xdf, 0xb0, 0xdb,
|
||||
0x53, 0x23, 0x3d, 0x7c, 0x64, 0x5a, 0x05, 0xaf, 0x1e, 0x8c, 0xbd, 0x39, 0x2f, 0xc1, 0xd6, 0x5e,
|
||||
0x24, 0x2a, 0x81, 0x34, 0x90, 0x26, 0xf3, 0x6f, 0xb3, 0x7c, 0x19, 0xa6, 0x5a, 0xab, 0xe0, 0x35,
|
||||
0xb3, 0xc9, 0x87, 0xfe, 0x10, 0xae, 0xe5, 0x5e, 0x8d, 0xe1, 0x55, 0x11, 0xcf, 0x9d, 0xe9, 0xdb,
|
||||
0x38, 0xe0, 0x62, 0xe7, 0x42, 0xeb, 0x20, 0x61, 0x39, 0x47, 0x1c, 0x64, 0xa5, 0x4f, 0xfb, 0x24,
|
||||
0x1a, 0xc7, 0xaf, 0x21, 0xfe, 0xfd, 0x99, 0xf8, 0xd3, 0xae, 0x49, 0xab, 0xe0, 0xdd, 0xe8, 0xcc,
|
||||
0xbe, 0x44, 0x23, 0x3f, 0xf4, 0xa9, 0x78, 0x8e, 0x79, 0x89, 0x1f, 0xc3, 0x72, 0x31, 0xf2, 0x63,
|
||||
0x54, 0x41, 0x9e, 0x03, 0x60, 0xf2, 0x69, 0x28, 0x6b, 0x66, 0xba, 0x0c, 0x9b, 0x46, 0x95, 0x2e,
|
||||
0xfd, 0x61, 0x07, 0xb9, 0x31, 0xbc, 0xd5, 0xa8, 0x0f, 0x97, 0xdc, 0xea, 0x41, 0xba, 0x04, 0xa3,
|
||||
0x26, 0x78, 0x03, 0xea, 0x14, 0x3b, 0x5a, 0x8d, 0x50, 0x9f, 0x89, 0x30, 0xea, 0x7b, 0x15, 0x02,
|
||||
0x1d, 0xce, 0xb6, 0xaa, 0x50, 0x56, 0xaa, 0xee, 0x2f, 0x06, 0xc0, 0x31, 0x0d, 0xa4, 0x48, 0x36,
|
||||
0xf7, 0xf7, 0x5f, 0xe5, 0x9d, 0xba, 0xf6, 0x57, 0x7f, 0xa3, 0x54, 0xa7, 0xae, 0x29, 0x99, 0xf8,
|
||||
0x43, 0x14, 0x27, 0xff, 0x10, 0x4f, 0x00, 0xe2, 0x84, 0x86, 0x2c, 0x20, 0x92, 0xa6, 0x97, 0x3d,
|
||||
0x53, 0x63, 0x5b, 0x9d, 0xff, 0x01, 0xbc, 0x55, 0x5f, 0x26, 0x5d, 0xe0, 0xca, 0x33, 0xa9, 0x1c,
|
||||
0xfe, 0xab, 0x3c, 0xeb, 0xed, 0xf0, 0x8b, 0x75, 0x17, 0x16, 0xe3, 0x88, 0x04, 0xb4, 0x2b, 0xa2,
|
||||
0x90, 0x26, 0xbe, 0x24, 0xa7, 0x98, 0xef, 0x96, 0xd7, 0x1c, 0x13, 0x1f, 0x91, 0x53, 0x37, 0x80,
|
||||
0x05, 0x04, 0x38, 0x8c, 0x08, 0xdf, 0x17, 0x21, 0xbd, 0x60, 0xaf, 0x31, 0xbf, 0xbd, 0xd7, 0xc1,
|
||||
0x64, 0xa9, 0x1f, 0x88, 0x8c, 0xcb, 0xbc, 0x6d, 0xad, 0xb1, 0x74, 0x5b, 0x4d, 0xdd, 0x5f, 0x0d,
|
||||
0x30, 0x87, 0x07, 0x6c, 0x40, 0xbd, 0x8f, 0xb4, 0xfa, 0x84, 0xf3, 0xf4, 0x3d, 0x95, 0x7b, 0x44,
|
||||
0xbe, 0x8a, 0x90, 0xd6, 0xd9, 0xe4, 0x3c, 0x75, 0x9e, 0x4e, 0x98, 0xf8, 0xfe, 0xe7, 0x47, 0xa9,
|
||||
0x8e, 0x19, 0xf9, 0x5f, 0xa8, 0x20, 0x49, 0x39, 0x9f, 0x2b, 0xb3, 0xf8, 0x1c, 0x58, 0xdb, 0x2a,
|
||||
0x78, 0x5a, 0xc1, 0x59, 0x05, 0x5b, 0x64, 0x32, 0xce, 0xa4, 0x3f, 0x88, 0xb4, 0x8a, 0x66, 0x69,
|
||||
0xb5, 0xe4, 0x35, 0xb5, 0xfc, 0x63, 0x1d, 0xf0, 0x54, 0x25, 0x10, 0x17, 0x21, 0xbd, 0xf7, 0xad,
|
||||
0x01, 0x55, 0x5d, 0xc5, 0x27, 0x7b, 0x8d, 0x45, 0xa8, 0xef, 0x25, 0x94, 0x48, 0x9a, 0x1c, 0x75,
|
||||
0x09, 0xb7, 0x0d, 0xc7, 0x86, 0x46, 0x2e, 0xd8, 0x7d, 0x9b, 0x91, 0xc8, 0x2e, 0x3a, 0x0d, 0x30,
|
||||
0x5f, 0xd0, 0x34, 0xc5, 0xf5, 0x12, 0x36, 0x23, 0x34, 0x4d, 0xf5, 0x62, 0xd9, 0xb1, 0xa0, 0xa2,
|
||||
0x87, 0x15, 0xb5, 0x6f, 0x5f, 0x48, 0x3d, 0xab, 0x2a, 0xe0, 0xc3, 0x84, 0x9e, 0xb0, 0xb3, 0x97,
|
||||
0x44, 0x06, 0x5d, 0xbb, 0xa6, 0x80, 0x0f, 0x45, 0x2a, 0x87, 0x12, 0x53, 0xe9, 0xea, 0xa1, 0xa5,
|
||||
0x86, 0x58, 0x09, 0x6c, 0x70, 0xaa, 0x50, 0x6c, 0x73, 0xbb, 0xae, 0x44, 0xfb, 0x42, 0xb6, 0xb9,
|
||||
0xdd, 0xb8, 0xb7, 0x07, 0xf5, 0xb1, 0xc7, 0x4f, 0x39, 0xf0, 0x9a, 0xbf, 0xe1, 0xe2, 0x1d, 0xd7,
|
||||
0x1d, 0xdf, 0x66, 0xa8, 0xba, 0xa4, 0x1a, 0x94, 0x5e, 0x65, 0x1d, 0xbb, 0xa8, 0x06, 0x2f, 0xb3,
|
||||
0xc8, 0x2e, 0xa9, 0xc1, 0x0e, 0xeb, 0xdb, 0x65, 0x94, 0x88, 0xd0, 0xae, 0x6c, 0x3d, 0xfc, 0xec,
|
||||
0xc1, 0x29, 0x93, 0xdd, 0xac, 0xb3, 0x16, 0x88, 0xde, 0xba, 0xa6, 0xfb, 0x3e, 0x13, 0xf9, 0x68,
|
||||
0x9d, 0x71, 0x49, 0x13, 0x4e, 0xa2, 0x75, 0x8c, 0xc0, 0xba, 0x8a, 0x40, 0xdc, 0xe9, 0x54, 0x71,
|
||||
0xf6, 0xf0, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x92, 0xa4, 0xfa, 0xf9, 0x10, 0x00, 0x00,
|
||||
// 1523 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0xcd, 0x6e, 0xdb, 0xca,
|
||||
0x15, 0x16, 0xf5, 0x4b, 0x1e, 0xc9, 0x32, 0xc3, 0x4d, 0x9d, 0xa4, 0x89, 0x5d, 0x36, 0x68, 0xdc,
|
||||
0x14, 0xb1, 0x91, 0x26, 0x4d, 0x9a, 0x14, 0x69, 0xfd, 0x5b, 0x4b, 0x48, 0x62, 0xab, 0x8c, 0xe3,
|
||||
0x45, 0x37, 0xc4, 0x88, 0x1c, 0x5b, 0x83, 0x50, 0x33, 0x0c, 0x39, 0x54, 0xac, 0x75, 0x77, 0xdd,
|
||||
0xf5, 0x01, 0xba, 0xee, 0xbe, 0xcb, 0xa2, 0x40, 0x5f, 0xa0, 0x8b, 0x2e, 0xbb, 0xef, 0x13, 0x14,
|
||||
0xf7, 0x05, 0x2e, 0xe6, 0x0c, 0xf5, 0x67, 0x48, 0xb1, 0x7c, 0x6f, 0x80, 0xbb, 0x9b, 0x39, 0x73,
|
||||
0xce, 0x77, 0x7e, 0xe7, 0xcc, 0x19, 0x80, 0x38, 0x22, 0x7c, 0x2b, 0x4e, 0x84, 0x14, 0xce, 0xad,
|
||||
0x3e, 0x8b, 0x06, 0x59, 0xaa, 0x77, 0x5b, 0xea, 0xe0, 0x4e, 0x23, 0x0d, 0x7a, 0xb4, 0x4f, 0x34,
|
||||
0xc9, 0xfd, 0x8b, 0x01, 0x8d, 0x23, 0xca, 0x69, 0xc2, 0x82, 0x33, 0x12, 0x65, 0xd4, 0xb9, 0x0b,
|
||||
0x66, 0x57, 0x88, 0xc8, 0x1f, 0x90, 0x68, 0xcd, 0xd8, 0x30, 0x36, 0xcd, 0x56, 0xc1, 0xab, 0x29,
|
||||
0xca, 0x19, 0x89, 0x9c, 0x7b, 0x60, 0x31, 0x2e, 0x9f, 0x3f, 0xc3, 0xd3, 0xe2, 0x86, 0xb1, 0x59,
|
||||
0x6a, 0x15, 0x3c, 0x13, 0x49, 0xf9, 0xf1, 0x79, 0x24, 0x88, 0xc4, 0xe3, 0xd2, 0x86, 0xb1, 0x69,
|
||||
0xa8, 0x63, 0x24, 0xa9, 0xe3, 0x75, 0x80, 0x54, 0x26, 0x8c, 0x5f, 0xe0, 0x79, 0x79, 0xc3, 0xd8,
|
||||
0xb4, 0x5a, 0x05, 0xcf, 0xd2, 0xb4, 0x33, 0x12, 0xed, 0x55, 0xa0, 0x34, 0x20, 0x91, 0xfb, 0x67,
|
||||
0x03, 0xac, 0x3f, 0x64, 0x34, 0x19, 0xb6, 0xf9, 0xb9, 0x70, 0x1c, 0x28, 0x4b, 0x11, 0x7f, 0x44,
|
||||
0x63, 0x4a, 0x1e, 0xae, 0x9d, 0x75, 0xa8, 0xf7, 0xa9, 0x4c, 0x58, 0xe0, 0xcb, 0x61, 0x4c, 0x51,
|
||||
0x95, 0xe5, 0x81, 0x26, 0x9d, 0x0e, 0x63, 0xea, 0xfc, 0x14, 0x56, 0x52, 0x4a, 0x92, 0xa0, 0xe7,
|
||||
0xc7, 0x24, 0x21, 0xfd, 0x54, 0x6b, 0xf3, 0x1a, 0x9a, 0xd8, 0x41, 0x9a, 0x62, 0x4a, 0x44, 0xc6,
|
||||
0x43, 0x3f, 0xa4, 0x01, 0xeb, 0x93, 0x68, 0xad, 0x82, 0x2a, 0x1a, 0x48, 0x3c, 0xd0, 0x34, 0xf7,
|
||||
0xff, 0x06, 0xc0, 0xbe, 0x88, 0xb2, 0x3e, 0x47, 0x6b, 0x6e, 0x83, 0x79, 0xce, 0x68, 0x14, 0xfa,
|
||||
0x2c, 0xcc, 0x2d, 0xaa, 0xe1, 0xbe, 0x1d, 0x3a, 0xaf, 0xc0, 0x0a, 0x89, 0x24, 0xda, 0x24, 0x15,
|
||||
0x9c, 0xe6, 0x2f, 0xef, 0x6d, 0xcd, 0xc4, 0x3f, 0x8f, 0xfc, 0x01, 0x91, 0x44, 0x59, 0xe9, 0x99,
|
||||
0x61, 0xbe, 0x72, 0x1e, 0x40, 0x93, 0xa5, 0x7e, 0x9c, 0xb0, 0x3e, 0x49, 0x86, 0xfe, 0x47, 0x3a,
|
||||
0x44, 0x9f, 0x4c, 0xaf, 0xc1, 0xd2, 0x8e, 0x26, 0xbe, 0xa1, 0x43, 0xe7, 0x2e, 0x58, 0x2c, 0xf5,
|
||||
0x49, 0x26, 0x45, 0xfb, 0x00, 0x3d, 0x32, 0x3d, 0x93, 0xa5, 0xbb, 0xb8, 0x57, 0x31, 0xe1, 0x34,
|
||||
0x95, 0x34, 0xf4, 0x63, 0x22, 0x7b, 0x6b, 0x95, 0x8d, 0x92, 0x8a, 0x89, 0x26, 0x75, 0x88, 0xec,
|
||||
0x39, 0x9b, 0x60, 0x2b, 0x1d, 0x24, 0x91, 0x4c, 0x32, 0xc1, 0x51, 0x4b, 0x15, 0x41, 0x9a, 0x2c,
|
||||
0xed, 0x8c, 0xc8, 0x6f, 0xe8, 0xd0, 0xfd, 0xdd, 0xc8, 0xe5, 0xc3, 0xcb, 0x38, 0x71, 0x9e, 0x40,
|
||||
0x99, 0xf1, 0x73, 0x81, 0xee, 0xd6, 0xaf, 0xba, 0x84, 0xb5, 0x36, 0x89, 0x8f, 0x87, 0xac, 0x0a,
|
||||
0xe0, 0xf0, 0x92, 0xa5, 0x32, 0xfd, 0xae, 0x00, 0x7b, 0x60, 0x61, 0x39, 0xa2, 0xfc, 0xaf, 0xa0,
|
||||
0x32, 0x50, 0x9b, 0x1c, 0x60, 0x7d, 0x0e, 0xc0, 0x74, 0x09, 0x7b, 0x9a, 0xdb, 0xfd, 0xbb, 0x01,
|
||||
0xcd, 0x0f, 0x9c, 0x24, 0x43, 0x8f, 0xf0, 0x0b, 0x8d, 0xf4, 0x5b, 0xa8, 0x07, 0xa8, 0xca, 0x5f,
|
||||
0xde, 0x20, 0x08, 0x26, 0xd9, 0xff, 0x39, 0x14, 0x45, 0x9c, 0xe7, 0xf6, 0xf6, 0x1c, 0xb1, 0x93,
|
||||
0x18, 0xf3, 0x5a, 0x14, 0xf1, 0xc4, 0xe8, 0xd2, 0x8d, 0x8c, 0xfe, 0x5b, 0x11, 0x56, 0xf7, 0xd8,
|
||||
0xd7, 0xb5, 0xfa, 0x21, 0xac, 0x46, 0xe2, 0x33, 0x4d, 0x7c, 0xc6, 0x83, 0x28, 0x4b, 0xd9, 0x40,
|
||||
0x97, 0xa7, 0xe9, 0x35, 0x91, 0xdc, 0x1e, 0x51, 0x15, 0x63, 0x16, 0xc7, 0x33, 0x8c, 0xba, 0x0c,
|
||||
0x9b, 0x48, 0x9e, 0x30, 0xee, 0x40, 0x5d, 0x23, 0x6a, 0x17, 0xcb, 0xcb, 0xb9, 0x08, 0x28, 0xa3,
|
||||
0xdb, 0xcc, 0x0e, 0xd4, 0xb5, 0x2a, 0x8d, 0x50, 0x59, 0x12, 0x01, 0x65, 0x70, 0xed, 0xfe, 0xdb,
|
||||
0x80, 0xfa, 0xbe, 0xe8, 0xc7, 0x24, 0xd1, 0x51, 0x3a, 0x02, 0x3b, 0xa2, 0xe7, 0xd2, 0xbf, 0x71,
|
||||
0xa8, 0x9a, 0x4a, 0x6c, 0xea, 0x8a, 0xb7, 0xe1, 0x56, 0xc2, 0x2e, 0x7a, 0xb3, 0x48, 0xc5, 0x65,
|
||||
0x90, 0x56, 0x51, 0x6e, 0xff, 0x6a, 0xbd, 0x94, 0x96, 0xa8, 0x17, 0xf7, 0x4f, 0x06, 0x98, 0xa7,
|
||||
0x34, 0xe9, 0x7f, 0x95, 0x8c, 0xbf, 0x80, 0x2a, 0xc6, 0x35, 0x5d, 0x2b, 0x6e, 0x94, 0x96, 0x09,
|
||||
0x6c, 0xce, 0xae, 0x9e, 0x03, 0x0b, 0xef, 0x0c, 0x9a, 0xf1, 0x0c, 0xcd, 0x37, 0xd0, 0xfc, 0x07,
|
||||
0x73, 0x20, 0xc6, 0x9c, 0x7a, 0x75, 0x12, 0x63, 0xe5, 0x3f, 0x86, 0x4a, 0xd0, 0x63, 0x51, 0x98,
|
||||
0xc7, 0xec, 0x47, 0x73, 0x04, 0x95, 0x8c, 0xa7, 0xb9, 0xdc, 0x75, 0xa8, 0xe5, 0xd2, 0x4e, 0x1d,
|
||||
0x6a, 0x6d, 0x3e, 0x20, 0x11, 0x0b, 0xed, 0x82, 0x53, 0x83, 0xd2, 0xb1, 0x90, 0xb6, 0xe1, 0xfe,
|
||||
0xd7, 0x00, 0xd0, 0x57, 0x02, 0x8d, 0x7a, 0x3e, 0x65, 0xd4, 0xcf, 0xe6, 0x60, 0x4f, 0x58, 0xf3,
|
||||
0x65, 0x6e, 0xd6, 0x2f, 0xa0, 0xac, 0x12, 0x7d, 0x9d, 0x55, 0xc8, 0xa4, 0x7c, 0xc0, 0x5c, 0xe6,
|
||||
0xb7, 0x77, 0xb1, 0x0f, 0xc8, 0xe5, 0x3e, 0x07, 0x73, 0xa4, 0x6b, 0xd6, 0x89, 0x26, 0xc0, 0x5b,
|
||||
0x71, 0xc1, 0x02, 0x12, 0xed, 0xf2, 0xd0, 0x36, 0x9c, 0x15, 0xb0, 0xf2, 0xfd, 0x49, 0x62, 0x17,
|
||||
0xdd, 0xff, 0x18, 0xb0, 0xa2, 0x05, 0x77, 0x13, 0x26, 0x7b, 0x27, 0xf1, 0xf7, 0xce, 0xfc, 0x4b,
|
||||
0x30, 0x89, 0x82, 0xf2, 0xc7, 0x7d, 0xea, 0xfe, 0x1c, 0xe1, 0x5c, 0x1b, 0x16, 0x5f, 0x8d, 0xe4,
|
||||
0xaa, 0x0f, 0x60, 0x45, 0xd7, 0xbd, 0x88, 0x69, 0x42, 0x78, 0xb8, 0x6c, 0xe7, 0x6a, 0xa0, 0xd4,
|
||||
0x89, 0x16, 0x72, 0xff, 0x6a, 0x8c, 0x1a, 0x18, 0x2a, 0xc1, 0x94, 0x8d, 0x42, 0x6f, 0xdc, 0x28,
|
||||
0xf4, 0xc5, 0x65, 0x42, 0xef, 0x6c, 0x4d, 0x5d, 0xb1, 0xeb, 0x5c, 0x55, 0xf7, 0xec, 0x5f, 0x45,
|
||||
0xb8, 0x33, 0x13, 0xf2, 0xc3, 0x01, 0x89, 0xbe, 0x5e, 0xaf, 0xfd, 0xa1, 0xe3, 0x9f, 0xb7, 0x9c,
|
||||
0xf2, 0x8d, 0x9e, 0xa8, 0xca, 0x8d, 0x9e, 0xa8, 0x7f, 0x56, 0xa1, 0x8c, 0xb1, 0x7a, 0x05, 0x96,
|
||||
0xa4, 0x49, 0xdf, 0xa7, 0x97, 0x71, 0x92, 0x47, 0xea, 0xee, 0x1c, 0x8c, 0x51, 0x57, 0x53, 0xb3,
|
||||
0xa0, 0x1c, 0x75, 0xb8, 0xd7, 0x00, 0x99, 0x4a, 0x82, 0x16, 0xd6, 0xa9, 0xfe, 0xf1, 0x97, 0x5a,
|
||||
0x8c, 0x9a, 0x14, 0xb3, 0x71, 0x13, 0xd8, 0x81, 0x7a, 0x97, 0x4d, 0xe4, 0x4b, 0x0b, 0xd3, 0x34,
|
||||
0xe9, 0x06, 0xad, 0x82, 0x07, 0xdd, 0x49, 0x1b, 0xd9, 0x87, 0x46, 0xa0, 0x5f, 0x0f, 0x0d, 0xa1,
|
||||
0xdf, 0xb0, 0xfb, 0x73, 0x33, 0x3d, 0x7e, 0x64, 0x5a, 0x05, 0xaf, 0x1e, 0x4c, 0xbd, 0x39, 0xef,
|
||||
0xc0, 0xd6, 0x5e, 0x24, 0xaa, 0x80, 0x34, 0x90, 0x0e, 0xe6, 0x4f, 0x16, 0xf9, 0x32, 0x2e, 0xb5,
|
||||
0x56, 0xc1, 0x6b, 0x66, 0xb3, 0x0f, 0x7d, 0x07, 0x6e, 0xe5, 0x5e, 0x4d, 0xe1, 0x55, 0x11, 0xcf,
|
||||
0x5d, 0xe8, 0xdb, 0x34, 0xe0, 0x6a, 0xf7, 0xca, 0xe8, 0x20, 0x61, 0x3d, 0x47, 0x1c, 0x55, 0xa5,
|
||||
0x4f, 0x07, 0x24, 0x9a, 0xc6, 0xaf, 0x21, 0xfe, 0xe3, 0x85, 0xf8, 0xf3, 0xae, 0x49, 0xab, 0xe0,
|
||||
0xdd, 0xe9, 0x2e, 0xbe, 0x44, 0x13, 0x3f, 0xb4, 0x56, 0xd4, 0x63, 0x5e, 0xe3, 0xc7, 0xb8, 0x5d,
|
||||
0x4c, 0xfc, 0x98, 0x74, 0x90, 0xd7, 0x00, 0x58, 0x7c, 0x1a, 0xca, 0x5a, 0x58, 0x2e, 0xe3, 0xa1,
|
||||
0x51, 0x95, 0xcb, 0x60, 0x3c, 0x41, 0xee, 0x8c, 0x6f, 0x35, 0xca, 0xc3, 0x35, 0xb7, 0x7a, 0x54,
|
||||
0x2e, 0xc1, 0x64, 0x08, 0xde, 0x81, 0x3a, 0xc5, 0x89, 0x56, 0x23, 0xd4, 0x17, 0x22, 0x4c, 0xe6,
|
||||
0x5e, 0x85, 0x40, 0xc7, 0xbb, 0xbd, 0x2a, 0x94, 0x95, 0xa8, 0xfb, 0x3f, 0x03, 0xe0, 0x8c, 0x06,
|
||||
0x52, 0x24, 0xbb, 0xc7, 0xc7, 0xef, 0xf3, 0x99, 0x5e, 0xfb, 0xab, 0x3f, 0x5c, 0x6a, 0xa6, 0xd7,
|
||||
0x21, 0x99, 0xf9, 0x6d, 0x14, 0x67, 0x7f, 0x1b, 0x2f, 0x00, 0xe2, 0x84, 0x86, 0x2c, 0x20, 0x92,
|
||||
0xa6, 0xd7, 0x3d, 0x53, 0x53, 0xac, 0xce, 0x6f, 0x00, 0x3e, 0xa9, 0xcf, 0x95, 0x6e, 0x70, 0xe5,
|
||||
0x85, 0xa1, 0x1c, 0xff, 0xc0, 0x3c, 0xeb, 0xd3, 0xf8, 0x33, 0xf6, 0x10, 0x56, 0xe3, 0x88, 0x04,
|
||||
0xb4, 0x27, 0xa2, 0x90, 0x26, 0xbe, 0x24, 0x17, 0x58, 0xef, 0x96, 0xd7, 0x9c, 0x22, 0x9f, 0x92,
|
||||
0x0b, 0x37, 0x80, 0x15, 0x04, 0xe8, 0x44, 0x84, 0x1f, 0x8b, 0x90, 0x5e, 0xb1, 0xd7, 0x58, 0xde,
|
||||
0xde, 0xdb, 0x60, 0xb2, 0xd4, 0x0f, 0x44, 0xc6, 0x65, 0x3e, 0xb6, 0xd6, 0x58, 0xba, 0xaf, 0xb6,
|
||||
0xee, 0x37, 0x06, 0x98, 0x63, 0x05, 0x3b, 0x50, 0x1f, 0x60, 0x58, 0x7d, 0xc2, 0x79, 0xfa, 0x85,
|
||||
0xce, 0x3d, 0x09, 0xbe, 0xca, 0x90, 0x96, 0xd9, 0xe5, 0x3c, 0x75, 0x5e, 0xce, 0x98, 0xf8, 0xe5,
|
||||
0xe7, 0x47, 0x89, 0x4e, 0x19, 0xf9, 0x6b, 0xa8, 0x60, 0x90, 0xf2, 0x78, 0x6e, 0x2c, 0x8a, 0xe7,
|
||||
0xc8, 0xda, 0x56, 0xc1, 0xd3, 0x02, 0xea, 0x57, 0x26, 0x32, 0x19, 0x67, 0xd2, 0x1f, 0x65, 0x5a,
|
||||
0x65, 0xb3, 0xb4, 0x59, 0xf2, 0x9a, 0x9a, 0xfe, 0x7b, 0x9d, 0xf0, 0x54, 0x15, 0x10, 0x17, 0x21,
|
||||
0x7d, 0xf4, 0x0f, 0x03, 0xaa, 0xba, 0x8b, 0xcf, 0xce, 0x1a, 0xab, 0x50, 0x3f, 0x4a, 0x28, 0x91,
|
||||
0x34, 0x39, 0xed, 0x11, 0x6e, 0x1b, 0x8e, 0x0d, 0x8d, 0x9c, 0x70, 0xf8, 0x29, 0x23, 0x91, 0x5d,
|
||||
0x74, 0x1a, 0x60, 0xbe, 0xa5, 0x69, 0x8a, 0xe7, 0x25, 0x1c, 0x46, 0x68, 0x9a, 0xea, 0xc3, 0xb2,
|
||||
0x63, 0x41, 0x45, 0x2f, 0x2b, 0x8a, 0xef, 0x58, 0x48, 0xbd, 0xab, 0x2a, 0xe0, 0x4e, 0x42, 0xcf,
|
||||
0xd9, 0xe5, 0x3b, 0x22, 0x83, 0x9e, 0x5d, 0x53, 0xc0, 0x1d, 0x91, 0xca, 0x31, 0xc5, 0x54, 0xb2,
|
||||
0x7a, 0x69, 0xa9, 0x25, 0x76, 0x02, 0x1b, 0x9c, 0x2a, 0x14, 0xdb, 0xdc, 0xae, 0x2b, 0xd2, 0xb1,
|
||||
0x90, 0x6d, 0x6e, 0x37, 0x1e, 0x1d, 0x41, 0x7d, 0xea, 0xf1, 0x53, 0x0e, 0x7c, 0xe0, 0x1f, 0xb9,
|
||||
0xf8, 0xcc, 0xf5, 0xc4, 0xb7, 0x1b, 0xaa, 0x29, 0xa9, 0x06, 0xa5, 0xf7, 0x59, 0xd7, 0x2e, 0xaa,
|
||||
0xc5, 0xbb, 0x2c, 0xb2, 0x4b, 0x6a, 0x71, 0xc0, 0x06, 0x76, 0x19, 0x29, 0x22, 0xb4, 0x2b, 0x7b,
|
||||
0x4f, 0xff, 0xf8, 0xe4, 0x82, 0xc9, 0x5e, 0xd6, 0xdd, 0x0a, 0x44, 0x7f, 0x5b, 0x87, 0xfb, 0x31,
|
||||
0x13, 0xf9, 0x6a, 0x9b, 0x71, 0x49, 0x13, 0x4e, 0xa2, 0x6d, 0xcc, 0xc0, 0xb6, 0xca, 0x40, 0xdc,
|
||||
0xed, 0x56, 0x71, 0xf7, 0xf4, 0xdb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x9a, 0xbc, 0xa6, 0x23,
|
||||
0x11, 0x00, 0x00,
|
||||
}
|
||||
|
||||
114
internal/proxy/expr_checker.go
Normal file
114
internal/proxy/expr_checker.go
Normal file
@ -0,0 +1,114 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/planpb"
|
||||
)
|
||||
|
||||
func ParseExprFromPlan(plan *planpb.PlanNode) (*planpb.Expr, error) {
|
||||
node := plan.GetNode()
|
||||
|
||||
if node == nil {
|
||||
return nil, errors.New("can't get expr from empty plan node")
|
||||
}
|
||||
|
||||
var expr *planpb.Expr
|
||||
switch node := node.(type) {
|
||||
case *planpb.PlanNode_VectorAnns:
|
||||
expr = node.VectorAnns.GetPredicates()
|
||||
case *planpb.PlanNode_Query:
|
||||
expr = node.Query.GetPredicates()
|
||||
default:
|
||||
return nil, errors.New("unsupported plan node type")
|
||||
}
|
||||
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
func ParsePartitionKeysFromBinaryExpr(expr *planpb.BinaryExpr) ([]*planpb.GenericValue, bool) {
|
||||
leftRes, leftInRange := ParsePartitionKeysFromExpr(expr.Left)
|
||||
RightRes, rightInRange := ParsePartitionKeysFromExpr(expr.Right)
|
||||
|
||||
if expr.Op == planpb.BinaryExpr_LogicalAnd {
|
||||
// case: partition_key_field in [7, 8] && partition_key > 8
|
||||
if len(leftRes)+len(RightRes) > 0 {
|
||||
leftRes = append(leftRes, RightRes...)
|
||||
return leftRes, false
|
||||
}
|
||||
|
||||
// case: other_field > 10 && partition_key_field > 8
|
||||
return nil, leftInRange || rightInRange
|
||||
}
|
||||
|
||||
if expr.Op == planpb.BinaryExpr_LogicalOr {
|
||||
// case: partition_key_field in [7, 8] or partition_key > 8
|
||||
if leftInRange || rightInRange {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// case: partition_key_field in [7, 8] or other_field > 10
|
||||
leftRes = append(leftRes, RightRes...)
|
||||
return leftRes, false
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func ParsePartitionKeysFromUnaryExpr(expr *planpb.UnaryExpr) ([]*planpb.GenericValue, bool) {
|
||||
res, partitionInRange := ParsePartitionKeysFromExpr(expr.GetChild())
|
||||
if expr.Op == planpb.UnaryExpr_Not {
|
||||
// case: partition_key_field not in [7, 8]
|
||||
if len(res) != 0 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// case: other_field not in [10]
|
||||
return nil, partitionInRange
|
||||
}
|
||||
|
||||
// UnaryOp only includes "Not" for now
|
||||
return res, partitionInRange
|
||||
}
|
||||
|
||||
func ParsePartitionKeysFromTermExpr(expr *planpb.TermExpr) ([]*planpb.GenericValue, bool) {
|
||||
if expr.GetColumnInfo().GetIsPartitionKey() {
|
||||
return expr.GetValues(), false
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func ParsePartitionKeysFromUnaryRangeExpr(expr *planpb.UnaryRangeExpr) ([]*planpb.GenericValue, bool) {
|
||||
if expr.GetColumnInfo().GetIsPartitionKey() && expr.GetOp() == planpb.OpType_Equal {
|
||||
return []*planpb.GenericValue{expr.Value}, false
|
||||
}
|
||||
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func ParsePartitionKeysFromExpr(expr *planpb.Expr) ([]*planpb.GenericValue, bool) {
|
||||
var res []*planpb.GenericValue
|
||||
partitionKeyInRange := false
|
||||
switch expr := expr.GetExpr().(type) {
|
||||
case *planpb.Expr_BinaryExpr:
|
||||
res, partitionKeyInRange = ParsePartitionKeysFromBinaryExpr(expr.BinaryExpr)
|
||||
case *planpb.Expr_UnaryExpr:
|
||||
res, partitionKeyInRange = ParsePartitionKeysFromUnaryExpr(expr.UnaryExpr)
|
||||
case *planpb.Expr_TermExpr:
|
||||
res, partitionKeyInRange = ParsePartitionKeysFromTermExpr(expr.TermExpr)
|
||||
case *planpb.Expr_UnaryRangeExpr:
|
||||
res, partitionKeyInRange = ParsePartitionKeysFromUnaryRangeExpr(expr.UnaryRangeExpr)
|
||||
}
|
||||
|
||||
return res, partitionKeyInRange
|
||||
}
|
||||
|
||||
func ParsePartitionKeys(expr *planpb.Expr) []*planpb.GenericValue {
|
||||
res, partitionKeyInRange := ParsePartitionKeysFromExpr(expr)
|
||||
if partitionKeyInRange {
|
||||
res = nil
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
138
internal/proxy/expr_checker_test.go
Normal file
138
internal/proxy/expr_checker_test.go
Normal file
@ -0,0 +1,138 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/parser/planparserv2"
|
||||
"github.com/milvus-io/milvus/internal/proto/planpb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
)
|
||||
|
||||
func TestParsePartitionKeys(t *testing.T) {
|
||||
prefix := "TestParsePartitionKeys"
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
fieldName2Type := make(map[string]schemapb.DataType)
|
||||
fieldName2Type["int64_field"] = schemapb.DataType_Int64
|
||||
fieldName2Type["varChar_field"] = schemapb.DataType_VarChar
|
||||
fieldName2Type["fvec_field"] = schemapb.DataType_FloatVector
|
||||
schema := constructCollectionSchemaByDataType(collectionName, fieldName2Type, "int64_field", false)
|
||||
partitionKeyField := &schemapb.FieldSchema{
|
||||
Name: "partition_key_field",
|
||||
DataType: schemapb.DataType_Int64,
|
||||
IsPartitionKey: true,
|
||||
}
|
||||
schema.Fields = append(schema.Fields, partitionKeyField)
|
||||
fieldID := common.StartOfUserFieldID
|
||||
for _, field := range schema.Fields {
|
||||
field.FieldID = int64(fieldID)
|
||||
fieldID++
|
||||
}
|
||||
|
||||
queryInfo := &planpb.QueryInfo{
|
||||
Topk: 10,
|
||||
MetricType: "L2",
|
||||
SearchParams: "",
|
||||
RoundDecimal: -1,
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
expr string
|
||||
expected int
|
||||
validPartitionKeys []int64
|
||||
invalidPartitionKeys []int64
|
||||
}
|
||||
cases := []testCase{
|
||||
{
|
||||
name: "binary_expr_and with term",
|
||||
expr: "partition_key_field in [7, 8] && int64_field >= 10",
|
||||
expected: 2,
|
||||
validPartitionKeys: []int64{7, 8},
|
||||
invalidPartitionKeys: []int64{},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_and with equal",
|
||||
expr: "partition_key_field == 7 && int64_field >= 10",
|
||||
expected: 1,
|
||||
validPartitionKeys: []int64{7},
|
||||
invalidPartitionKeys: []int64{},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_and with term2",
|
||||
expr: "partition_key_field in [7, 8] && int64_field == 10",
|
||||
expected: 2,
|
||||
validPartitionKeys: []int64{7, 8},
|
||||
invalidPartitionKeys: []int64{10},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_and with partition key in range",
|
||||
expr: "partition_key_field in [7, 8] && partition_key_field > 9",
|
||||
expected: 2,
|
||||
validPartitionKeys: []int64{7, 8},
|
||||
invalidPartitionKeys: []int64{9},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_and with partition key in range2",
|
||||
expr: "int64_field == 10 && partition_key_field > 9",
|
||||
expected: 0,
|
||||
validPartitionKeys: []int64{},
|
||||
invalidPartitionKeys: []int64{},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_and with term and not",
|
||||
expr: "partition_key_field in [7, 8] && partition_key_field not in [10, 20]",
|
||||
expected: 2,
|
||||
validPartitionKeys: []int64{7, 8},
|
||||
invalidPartitionKeys: []int64{10, 20},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_or with term and not",
|
||||
expr: "partition_key_field in [7, 8] or partition_key_field not in [10, 20]",
|
||||
expected: 0,
|
||||
validPartitionKeys: []int64{},
|
||||
invalidPartitionKeys: []int64{},
|
||||
},
|
||||
{
|
||||
name: "binary_expr_or with term and not 2",
|
||||
expr: "partition_key_field in [7, 8] or int64_field not in [10, 20]",
|
||||
expected: 2,
|
||||
validPartitionKeys: []int64{7, 8},
|
||||
invalidPartitionKeys: []int64{10, 20},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// test search plan
|
||||
searchPlan, err := planparserv2.CreateSearchPlan(schema, tc.expr, "fvec_field", queryInfo)
|
||||
assert.NoError(t, err)
|
||||
expr, err := ParseExprFromPlan(searchPlan)
|
||||
assert.NoError(t, err)
|
||||
partitionKeys := ParsePartitionKeys(expr)
|
||||
assert.Equal(t, tc.expected, len(partitionKeys))
|
||||
for _, key := range partitionKeys {
|
||||
int64Val := key.Val.(*planpb.GenericValue_Int64Val).Int64Val
|
||||
assert.Contains(t, tc.validPartitionKeys, int64Val)
|
||||
assert.NotContains(t, tc.invalidPartitionKeys, int64Val)
|
||||
}
|
||||
|
||||
// test query plan
|
||||
queryPlan, err := planparserv2.CreateRetrievePlan(schema, tc.expr)
|
||||
assert.NoError(t, err)
|
||||
expr, err = ParseExprFromPlan(queryPlan)
|
||||
assert.NoError(t, err)
|
||||
partitionKeys = ParsePartitionKeys(expr)
|
||||
assert.Equal(t, tc.expected, len(partitionKeys))
|
||||
for _, key := range partitionKeys {
|
||||
int64Val := key.Val.(*planpb.GenericValue_Int64Val).Int64Val
|
||||
assert.Contains(t, tc.validPartitionKeys, int64Val)
|
||||
assert.NotContains(t, tc.invalidPartitionKeys, int64Val)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -911,11 +911,11 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
|
||||
zap.String("collection", request.CollectionName),
|
||||
zap.String("partition", request.PartitionName))
|
||||
|
||||
log.Debug(rpcReceived("CreatePartition"))
|
||||
log.Debug(rpcReceived(method))
|
||||
|
||||
if err := node.sched.ddQueue.Enqueue(cpt); err != nil {
|
||||
log.Warn(
|
||||
rpcFailedToEnqueue("CreatePartition"),
|
||||
rpcFailedToEnqueue(method),
|
||||
zap.Error(err))
|
||||
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.AbandonLabel).Inc()
|
||||
@ -927,13 +927,13 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
rpcEnqueued("CreatePartition"),
|
||||
rpcEnqueued(method),
|
||||
zap.Uint64("BeginTS", cpt.BeginTs()),
|
||||
zap.Uint64("EndTS", cpt.EndTs()))
|
||||
|
||||
if err := cpt.WaitToFinish(); err != nil {
|
||||
log.Warn(
|
||||
rpcFailedToWaitToFinish("CreatePartition"),
|
||||
rpcFailedToWaitToFinish(method),
|
||||
zap.Error(err),
|
||||
zap.Uint64("BeginTS", cpt.BeginTs()),
|
||||
zap.Uint64("EndTS", cpt.EndTs()))
|
||||
@ -947,7 +947,7 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
rpcDone("CreatePartition"),
|
||||
rpcDone(method),
|
||||
zap.Uint64("BeginTS", cpt.BeginTs()),
|
||||
zap.Uint64("EndTS", cpt.EndTs()))
|
||||
|
||||
@ -2028,7 +2028,7 @@ func (node *Proxy) GetIndexState(ctx context.Context, request *milvuspb.GetIndex
|
||||
}, nil
|
||||
}
|
||||
|
||||
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-Insert")
|
||||
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-GetIndexState")
|
||||
defer sp.End()
|
||||
|
||||
dipt := &getIndexStateTask{
|
||||
@ -2145,10 +2145,6 @@ func (node *Proxy) Insert(ctx context.Context, request *milvuspb.InsertRequest)
|
||||
chTicker: node.chTicker,
|
||||
}
|
||||
|
||||
if len(it.insertMsg.PartitionName) <= 0 {
|
||||
it.insertMsg.PartitionName = Params.CommonCfg.DefaultPartitionName.GetValue()
|
||||
}
|
||||
|
||||
constructFailedResponse := func(err error) *milvuspb.MutationResult {
|
||||
numRows := request.NumRows
|
||||
errIndex := make([]uint32, numRows)
|
||||
@ -2383,10 +2379,6 @@ func (node *Proxy) Upsert(ctx context.Context, request *milvuspb.UpsertRequest)
|
||||
chTicker: node.chTicker,
|
||||
}
|
||||
|
||||
if len(it.req.PartitionName) <= 0 {
|
||||
it.req.PartitionName = Params.CommonCfg.DefaultPartitionName.GetValue()
|
||||
}
|
||||
|
||||
constructFailedResponse := func(err error, errCode commonpb.ErrorCode) *milvuspb.MutationResult {
|
||||
numRows := request.NumRows
|
||||
errIndex := make([]uint32, numRows)
|
||||
|
||||
@ -341,7 +341,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
}
|
||||
switch dataType {
|
||||
case schemapb.DataType_Bool:
|
||||
fieldData.FieldName = testBoolField
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_BoolData{
|
||||
@ -352,7 +352,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_Int32:
|
||||
fieldData.FieldName = testInt32Field
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_IntData{
|
||||
@ -363,7 +363,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_Int64:
|
||||
fieldData.FieldName = testInt64Field
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_LongData{
|
||||
@ -374,7 +374,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_Float:
|
||||
fieldData.FieldName = testFloatField
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_FloatData{
|
||||
@ -385,7 +385,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_Double:
|
||||
fieldData.FieldName = testDoubleField
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_DoubleData{
|
||||
@ -396,7 +396,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
fieldData.FieldName = testVarCharField
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_StringData{
|
||||
@ -407,7 +407,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_FloatVector:
|
||||
fieldData.FieldName = testFloatVecField
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Vectors{
|
||||
Vectors: &schemapb.VectorField{
|
||||
Dim: int64(testVecDim),
|
||||
@ -419,7 +419,7 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, numRows int
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_BinaryVector:
|
||||
fieldData.FieldName = testBinaryVecField
|
||||
fieldData.FieldName = fieldName
|
||||
fieldData.Field = &schemapb.FieldData_Vectors{
|
||||
Vectors: &schemapb.VectorField{
|
||||
Dim: int64(testVecDim),
|
||||
|
||||
@ -18,14 +18,16 @@ package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/allocator"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/msgpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/allocator"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
@ -33,76 +35,27 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
func assignSegmentID(ctx context.Context, insertMsg *msgstream.InsertMsg, result *milvuspb.MutationResult, channelNames []string, idAllocator *allocator.IDAllocator, segIDAssigner *segIDAssigner) (*msgstream.MsgPack, error) {
|
||||
func genInsertMsgsByPartition(ctx context.Context,
|
||||
segmentID UniqueID,
|
||||
partitionID UniqueID,
|
||||
partitionName string,
|
||||
rowOffsets []int,
|
||||
channelName string,
|
||||
insertMsg *msgstream.InsertMsg) ([]msgstream.TsMsg, error) {
|
||||
threshold := Params.PulsarCfg.MaxMessageSize.GetAsInt()
|
||||
log.Debug("assign segmentid", zap.Int("threshold", threshold))
|
||||
|
||||
msgPack := &msgstream.MsgPack{
|
||||
BeginTs: insertMsg.BeginTs(),
|
||||
EndTs: insertMsg.EndTs(),
|
||||
}
|
||||
|
||||
// generate hash value for every primary key
|
||||
if len(insertMsg.HashValues) != 0 {
|
||||
log.Warn("the hashvalues passed through client is not supported now, and will be overwritten")
|
||||
}
|
||||
insertMsg.HashValues = typeutil.HashPK2Channels(result.IDs, channelNames)
|
||||
// groupedHashKeys represents the dmChannel index
|
||||
channel2RowOffsets := make(map[string][]int) // channelName to count
|
||||
channelMaxTSMap := make(map[string]Timestamp) // channelName to max Timestamp
|
||||
|
||||
// assert len(it.hashValues) < maxInt
|
||||
for offset, channelID := range insertMsg.HashValues {
|
||||
channelName := channelNames[channelID]
|
||||
if _, ok := channel2RowOffsets[channelName]; !ok {
|
||||
channel2RowOffsets[channelName] = []int{}
|
||||
}
|
||||
channel2RowOffsets[channelName] = append(channel2RowOffsets[channelName], offset)
|
||||
|
||||
if _, ok := channelMaxTSMap[channelName]; !ok {
|
||||
channelMaxTSMap[channelName] = typeutil.ZeroTimestamp
|
||||
}
|
||||
ts := insertMsg.Timestamps[offset]
|
||||
if channelMaxTSMap[channelName] < ts {
|
||||
channelMaxTSMap[channelName] = ts
|
||||
}
|
||||
}
|
||||
|
||||
// pre-alloc msg id by batch
|
||||
var idBegin, idEnd int64
|
||||
var err error
|
||||
|
||||
// fetch next id, if not id available, fetch next batch
|
||||
// lazy fetch, get first batch after first getMsgID called
|
||||
getMsgID := func() (int64, error) {
|
||||
if idBegin == idEnd {
|
||||
err = retry.Do(ctx, func() error {
|
||||
idBegin, idEnd, err = idAllocator.Alloc(16)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to allocate msg id", zap.Int64("base.MsgID", insertMsg.Base.MsgID), zap.Error(err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
result := idBegin
|
||||
idBegin++
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// create empty insert message
|
||||
createInsertMsg := func(segmentID UniqueID, channelName string, msgID int64) *msgstream.InsertMsg {
|
||||
createInsertMsg := func(segmentID UniqueID, channelName string) *msgstream.InsertMsg {
|
||||
insertReq := msgpb.InsertRequest{
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Insert),
|
||||
commonpbutil.WithMsgID(msgID),
|
||||
commonpbutil.WithTimeStamp(insertMsg.BeginTimestamp), // entity's timestamp was set to equal it.BeginTimestamp in preExecute()
|
||||
commonpbutil.WithSourceID(insertMsg.Base.SourceID),
|
||||
),
|
||||
CollectionID: insertMsg.CollectionID,
|
||||
PartitionID: insertMsg.PartitionID,
|
||||
PartitionID: partitionID,
|
||||
CollectionName: insertMsg.CollectionName,
|
||||
PartitionName: insertMsg.PartitionName,
|
||||
PartitionName: partitionName,
|
||||
SegmentID: segmentID,
|
||||
ShardName: channelName,
|
||||
Version: msgpb.InsertDataVersion_ColumnBased,
|
||||
@ -119,15 +72,9 @@ func assignSegmentID(ctx context.Context, insertMsg *msgstream.InsertMsg, result
|
||||
return msg
|
||||
}
|
||||
|
||||
// repack the row data corresponding to the offset to insertMsg
|
||||
getInsertMsgsBySegmentID := func(segmentID UniqueID, rowOffsets []int, channelName string, maxMessageSize int) ([]msgstream.TsMsg, error) {
|
||||
repackedMsgs := make([]msgstream.TsMsg, 0)
|
||||
requestSize := 0
|
||||
msgID, err := getMsgID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg := createInsertMsg(segmentID, channelName, msgID)
|
||||
msg := createInsertMsg(segmentID, channelName)
|
||||
for _, offset := range rowOffsets {
|
||||
curRowMessageSize, err := typeutil.EstimateEntitySize(insertMsg.GetFieldsData(), offset)
|
||||
if err != nil {
|
||||
@ -135,13 +82,9 @@ func assignSegmentID(ctx context.Context, insertMsg *msgstream.InsertMsg, result
|
||||
}
|
||||
|
||||
// if insertMsg's size is greater than the threshold, split into multiple insertMsgs
|
||||
if requestSize+curRowMessageSize >= maxMessageSize {
|
||||
if requestSize+curRowMessageSize >= threshold {
|
||||
repackedMsgs = append(repackedMsgs, msg)
|
||||
msgID, err = getMsgID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg = createInsertMsg(segmentID, channelName, msgID)
|
||||
msg = createInsertMsg(segmentID, channelName)
|
||||
requestSize = 0
|
||||
}
|
||||
|
||||
@ -155,13 +98,33 @@ func assignSegmentID(ctx context.Context, insertMsg *msgstream.InsertMsg, result
|
||||
repackedMsgs = append(repackedMsgs, msg)
|
||||
|
||||
return repackedMsgs, nil
|
||||
}
|
||||
|
||||
func repackInsertDataByPartition(ctx context.Context,
|
||||
partitionName string,
|
||||
rowOffsets []int,
|
||||
channelName string,
|
||||
insertMsg *msgstream.InsertMsg,
|
||||
segIDAssigner *segIDAssigner) ([]msgstream.TsMsg, error) {
|
||||
res := make([]msgstream.TsMsg, 0)
|
||||
|
||||
maxTs := Timestamp(0)
|
||||
for _, offset := range rowOffsets {
|
||||
ts := insertMsg.Timestamps[offset]
|
||||
if maxTs < ts {
|
||||
maxTs = ts
|
||||
}
|
||||
}
|
||||
|
||||
// get allocated segmentID info for every dmChannel and repack insertMsgs for every segmentID
|
||||
for channelName, rowOffsets := range channel2RowOffsets {
|
||||
assignedSegmentInfos, err := segIDAssigner.GetSegmentID(insertMsg.CollectionID, insertMsg.PartitionID, channelName, uint32(len(rowOffsets)), channelMaxTSMap[channelName])
|
||||
partitionID, err := globalMetaCache.GetPartitionID(ctx, insertMsg.CollectionName, partitionName)
|
||||
if err != nil {
|
||||
log.Error("allocate segmentID for insert data failed", zap.Int64("collectionID", insertMsg.CollectionID), zap.String("channel name", channelName),
|
||||
return nil, err
|
||||
}
|
||||
assignedSegmentInfos, err := segIDAssigner.GetSegmentID(insertMsg.CollectionID, partitionID, channelName, uint32(len(rowOffsets)), maxTs)
|
||||
if err != nil {
|
||||
log.Error("allocate segmentID for insert data failed",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.String("channel name", channelName),
|
||||
zap.Int("allocate count", len(rowOffsets)),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
@ -170,15 +133,157 @@ func assignSegmentID(ctx context.Context, insertMsg *msgstream.InsertMsg, result
|
||||
startPos := 0
|
||||
for segmentID, count := range assignedSegmentInfos {
|
||||
subRowOffsets := rowOffsets[startPos : startPos+int(count)]
|
||||
insertMsgs, err := getInsertMsgsBySegmentID(segmentID, subRowOffsets, channelName, threshold)
|
||||
msgs, err := genInsertMsgsByPartition(ctx, segmentID, partitionID, partitionName, subRowOffsets, channelName, insertMsg)
|
||||
if err != nil {
|
||||
log.Error("repack insert data to insert msgs failed", zap.Int64("collectionID", insertMsg.CollectionID),
|
||||
log.Warn("repack insert data to insert msgs failed",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.Int64("partitionID", partitionID),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, insertMsgs...)
|
||||
res = append(res, msgs...)
|
||||
startPos += int(count)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func setMsgID(ctx context.Context,
|
||||
msgs []msgstream.TsMsg,
|
||||
idAllocator *allocator.IDAllocator) error {
|
||||
var idBegin int64
|
||||
var err error
|
||||
|
||||
err = retry.Do(ctx, func() error {
|
||||
idBegin, _, err = idAllocator.Alloc(uint32(len(msgs)))
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to allocate msg id", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
for i, msg := range msgs {
|
||||
msg.SetID(idBegin + UniqueID(i))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func repackInsertData(ctx context.Context,
|
||||
channelNames []string,
|
||||
insertMsg *msgstream.InsertMsg,
|
||||
result *milvuspb.MutationResult,
|
||||
idAllocator *allocator.IDAllocator,
|
||||
segIDAssigner *segIDAssigner) (*msgstream.MsgPack, error) {
|
||||
msgPack := &msgstream.MsgPack{
|
||||
BeginTs: insertMsg.BeginTs(),
|
||||
EndTs: insertMsg.EndTs(),
|
||||
}
|
||||
|
||||
channel2RowOffsets := assignChannelsByPK(result.IDs, channelNames, insertMsg)
|
||||
for channel, rowOffsets := range channel2RowOffsets {
|
||||
partitionName := insertMsg.PartitionName
|
||||
msgs, err := repackInsertDataByPartition(ctx, partitionName, rowOffsets, channel, insertMsg, segIDAssigner)
|
||||
if err != nil {
|
||||
log.Warn("repack insert data to msg pack failed",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.String("partition name", partitionName),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgPack.Msgs = append(msgPack.Msgs, msgs...)
|
||||
}
|
||||
|
||||
err := setMsgID(ctx, msgPack.Msgs, idAllocator)
|
||||
if err != nil {
|
||||
log.Error("failed to set msgID when repack insert data",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.String("partition name", insertMsg.PartitionName),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgPack, nil
|
||||
}
|
||||
|
||||
func repackInsertDataWithPartitionKey(ctx context.Context,
|
||||
channelNames []string,
|
||||
partitionKeys *schemapb.FieldData,
|
||||
insertMsg *msgstream.InsertMsg,
|
||||
result *milvuspb.MutationResult,
|
||||
idAllocator *allocator.IDAllocator,
|
||||
segIDAssigner *segIDAssigner) (*msgstream.MsgPack, error) {
|
||||
msgPack := &msgstream.MsgPack{
|
||||
BeginTs: insertMsg.BeginTs(),
|
||||
EndTs: insertMsg.EndTs(),
|
||||
}
|
||||
|
||||
channel2RowOffsets := assignChannelsByPK(result.IDs, channelNames, insertMsg)
|
||||
partitionNames, err := getDefaultPartitionNames(ctx, insertMsg.CollectionName)
|
||||
if err != nil {
|
||||
log.Warn("get default partition names failed in partition key mode",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
hashValues, err := typeutil.HashKey2Partitions(partitionKeys, partitionNames)
|
||||
if err != nil {
|
||||
log.Warn("has partition keys to partitions failed",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for channel, rowOffsets := range channel2RowOffsets {
|
||||
partition2RowOffsets := make(map[string][]int)
|
||||
for _, idx := range rowOffsets {
|
||||
partitionName := partitionNames[hashValues[idx]]
|
||||
if _, ok := partition2RowOffsets[partitionName]; !ok {
|
||||
partition2RowOffsets[partitionName] = []int{}
|
||||
}
|
||||
partition2RowOffsets[partitionName] = append(partition2RowOffsets[partitionName], idx)
|
||||
}
|
||||
|
||||
errGroup, _ := errgroup.WithContext(ctx)
|
||||
partition2Msgs := sync.Map{}
|
||||
for partitionName, offsets := range partition2RowOffsets {
|
||||
partitionName := partitionName
|
||||
offsets := offsets
|
||||
errGroup.Go(func() error {
|
||||
msgs, err := repackInsertDataByPartition(ctx, partitionName, offsets, channel, insertMsg, segIDAssigner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partition2Msgs.Store(partitionName, msgs)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = errGroup.Wait()
|
||||
if err != nil {
|
||||
log.Warn("repack insert data into insert msg pack failed",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.String("channel name", channel),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
partition2Msgs.Range(func(k, v interface{}) bool {
|
||||
msgs := v.([]msgstream.TsMsg)
|
||||
msgPack.Msgs = append(msgPack.Msgs, msgs...)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
err = setMsgID(ctx, msgPack.Msgs, idAllocator)
|
||||
if err != nil {
|
||||
log.Error("failed to set msgID when repack insert data",
|
||||
zap.String("collection name", insertMsg.CollectionName),
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgPack, nil
|
||||
|
||||
231
internal/proxy/msg_pack_test.go
Normal file
231
internal/proxy/msg_pack_test.go
Normal file
@ -0,0 +1,231 @@
|
||||
// Licensed to the LF AI & Data foundation under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/msgpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/allocator"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
)
|
||||
|
||||
func TestRepackInsertData(t *testing.T) {
|
||||
nb := 10
|
||||
hash := generateHashKeys(nb)
|
||||
prefix := "TestRepackInsertData"
|
||||
dbName := ""
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
partitionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
rc := NewRootCoordMock()
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
err := InitMetaCache(ctx, rc, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, paramtable.GetNodeID())
|
||||
assert.NoError(t, err)
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
||||
t.Run("create collection", func(t *testing.T) {
|
||||
resp, err := rc.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
Base: nil,
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
})
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode())
|
||||
assert.NoError(t, err)
|
||||
|
||||
resp, err = rc.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_CreatePartition,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
})
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
fieldData := generateFieldData(schemapb.DataType_Int64, testInt64Field, nb)
|
||||
insertMsg := &BaseInsertTask{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: hash,
|
||||
},
|
||||
InsertRequest: msgpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
NumRows: uint64(nb),
|
||||
FieldsData: []*schemapb.FieldData{fieldData},
|
||||
Version: msgpb.InsertDataVersion_ColumnBased,
|
||||
},
|
||||
}
|
||||
insertMsg.Timestamps = make([]uint64, nb)
|
||||
for index := range insertMsg.Timestamps {
|
||||
insertMsg.Timestamps[index] = insertMsg.BeginTimestamp
|
||||
}
|
||||
insertMsg.RowIDs = make([]UniqueID, nb)
|
||||
for index := range insertMsg.RowIDs {
|
||||
insertMsg.RowIDs[index] = int64(index)
|
||||
}
|
||||
|
||||
ids, err := parsePrimaryFieldData2IDs(fieldData)
|
||||
assert.NoError(t, err)
|
||||
result := &milvuspb.MutationResult{
|
||||
IDs: ids,
|
||||
}
|
||||
|
||||
t.Run("assign segmentID failed", func(t *testing.T) {
|
||||
fakeSegAllocator, err := newSegIDAssigner(ctx, &mockDataCoord2{expireTime: Timestamp(2500)}, getLastTick1)
|
||||
assert.NoError(t, err)
|
||||
_ = fakeSegAllocator.Start()
|
||||
defer fakeSegAllocator.Close()
|
||||
|
||||
_, err = repackInsertData(ctx, []string{"test_dml_channel"}, insertMsg,
|
||||
result, idAllocator, fakeSegAllocator)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
segAllocator, err := newSegIDAssigner(ctx, &mockDataCoord{expireTime: Timestamp(2500)}, getLastTick1)
|
||||
assert.NoError(t, err)
|
||||
_ = segAllocator.Start()
|
||||
defer segAllocator.Close()
|
||||
|
||||
t.Run("repack insert data success", func(t *testing.T) {
|
||||
_, err = repackInsertData(ctx, []string{"test_dml_channel"}, insertMsg, result, idAllocator, segAllocator)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRepackInsertDataWithPartitionKey(t *testing.T) {
|
||||
nb := 10
|
||||
hash := generateHashKeys(nb)
|
||||
prefix := "TestRepackInsertData"
|
||||
dbName := ""
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
rc := NewRootCoordMock()
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
err := InitMetaCache(ctx, rc, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, paramtable.GetNodeID())
|
||||
assert.NoError(t, err)
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
||||
segAllocator, err := newSegIDAssigner(ctx, &mockDataCoord{expireTime: Timestamp(2500)}, getLastTick1)
|
||||
assert.NoError(t, err)
|
||||
_ = segAllocator.Start()
|
||||
defer segAllocator.Close()
|
||||
|
||||
fieldName2Types := map[string]schemapb.DataType{
|
||||
testInt64Field: schemapb.DataType_Int64,
|
||||
testVarCharField: schemapb.DataType_VarChar,
|
||||
testFloatVecField: schemapb.DataType_FloatVector}
|
||||
|
||||
t.Run("create collection with partition key", func(t *testing.T) {
|
||||
schema := ConstructCollectionSchemaWithPartitionKey(collectionName, fieldName2Types, testInt64Field, testVarCharField, false)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
|
||||
resp, err := rc.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||
Base: nil,
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
Schema: marshaledSchema,
|
||||
NumPartitions: 100,
|
||||
})
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
fieldNameToDatas := make(map[string]*schemapb.FieldData)
|
||||
fieldDatas := make([]*schemapb.FieldData, 0)
|
||||
for name, dataType := range fieldName2Types {
|
||||
data := generateFieldData(dataType, name, nb)
|
||||
fieldNameToDatas[name] = data
|
||||
fieldDatas = append(fieldDatas, data)
|
||||
}
|
||||
|
||||
insertMsg := &BaseInsertTask{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: hash,
|
||||
},
|
||||
InsertRequest: msgpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
NumRows: uint64(nb),
|
||||
FieldsData: fieldDatas,
|
||||
Version: msgpb.InsertDataVersion_ColumnBased,
|
||||
},
|
||||
}
|
||||
insertMsg.Timestamps = make([]uint64, nb)
|
||||
for index := range insertMsg.Timestamps {
|
||||
insertMsg.Timestamps[index] = insertMsg.BeginTimestamp
|
||||
}
|
||||
insertMsg.RowIDs = make([]UniqueID, nb)
|
||||
for index := range insertMsg.RowIDs {
|
||||
insertMsg.RowIDs[index] = int64(index)
|
||||
}
|
||||
|
||||
ids, err := parsePrimaryFieldData2IDs(fieldNameToDatas[testInt64Field])
|
||||
assert.NoError(t, err)
|
||||
result := &milvuspb.MutationResult{
|
||||
IDs: ids,
|
||||
}
|
||||
|
||||
t.Run("repack insert data success", func(t *testing.T) {
|
||||
partitionKeys := generateFieldData(schemapb.DataType_VarChar, testVarCharField, nb)
|
||||
_, err = repackInsertDataWithPartitionKey(ctx, []string{"test_dml_channel"}, partitionKeys,
|
||||
insertMsg, result, idAllocator, segAllocator)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@ -280,6 +280,7 @@ func createColumnInfo(field *schemapb.FieldSchema) *planpb.ColumnInfo {
|
||||
FieldId: field.FieldID,
|
||||
DataType: field.DataType,
|
||||
IsPrimaryKey: field.IsPrimaryKey,
|
||||
IsPartitionKey: field.IsPartitionKey,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -372,6 +372,26 @@ func (coord *RootCoordMock) CreateCollection(ctx context.Context, req *milvuspb.
|
||||
partitionID2Meta: make(map[typeutil.UniqueID]partitionMeta),
|
||||
}
|
||||
|
||||
idGenerator := uniquegenerator.GetUniqueIntGeneratorIns()
|
||||
defaultPartitionName := Params.CommonCfg.DefaultPartitionName.GetValue()
|
||||
_, err = typeutil.GetPartitionKeyFieldSchema(&schema)
|
||||
if err == nil {
|
||||
partitionNums := req.GetNumPartitions()
|
||||
for i := int64(0); i < partitionNums; i++ {
|
||||
partitionName := fmt.Sprintf("%s_%d", defaultPartitionName, i)
|
||||
id := UniqueID(idGenerator.GetInt())
|
||||
coord.collID2Partitions[collID].partitionName2ID[partitionName] = id
|
||||
coord.collID2Partitions[collID].partitionID2Name[id] = partitionName
|
||||
coord.collID2Partitions[collID].partitionID2Meta[id] = partitionMeta{}
|
||||
}
|
||||
} else {
|
||||
|
||||
id := UniqueID(idGenerator.GetInt())
|
||||
coord.collID2Partitions[collID].partitionName2ID[defaultPartitionName] = id
|
||||
coord.collID2Partitions[collID].partitionID2Name[id] = defaultPartitionName
|
||||
coord.collID2Partitions[collID].partitionID2Meta[id] = partitionMeta{}
|
||||
}
|
||||
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
|
||||
@ -158,6 +158,49 @@ func (cct *createCollectionTask) OnEnqueue() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cct *createCollectionTask) validatePartitionKey() error {
|
||||
idx := -1
|
||||
for i, field := range cct.schema.Fields {
|
||||
if field.GetIsPartitionKey() {
|
||||
if idx != -1 {
|
||||
return fmt.Errorf("there are more than one partition key, field name = %s, %s", cct.schema.Fields[idx].Name, field.Name)
|
||||
}
|
||||
|
||||
if field.GetIsPrimaryKey() {
|
||||
return errors.New("the partition key field must not be primary field")
|
||||
}
|
||||
|
||||
// The type of the partition key field can only be int64 and varchar
|
||||
if field.DataType != schemapb.DataType_Int64 && field.DataType != schemapb.DataType_VarChar {
|
||||
return errors.New("the data type of partition key should be Int64 or VarChar")
|
||||
}
|
||||
|
||||
if cct.GetNumPartitions() < 0 {
|
||||
return errors.New("the specified partitions should be greater than 0 if partition key is used")
|
||||
}
|
||||
|
||||
// set default physical partitions num if enable partition key mode
|
||||
if cct.GetNumPartitions() == 0 {
|
||||
cct.NumPartitions = common.DefaultPartitionsWithPartitionKey
|
||||
}
|
||||
|
||||
idx = i
|
||||
}
|
||||
}
|
||||
|
||||
if idx == -1 {
|
||||
if cct.GetNumPartitions() != 0 {
|
||||
return fmt.Errorf("num_partitions should only be specified with partition key field enabled")
|
||||
}
|
||||
} else {
|
||||
log.Info("create collection with partition key mode",
|
||||
zap.String("collectionName", cct.CollectionName),
|
||||
zap.Int64("numDefaultPartitions", cct.GetNumPartitions()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cct *createCollectionTask) PreExecute(ctx context.Context) error {
|
||||
cct.Base.MsgType = commonpb.MsgType_CreateCollection
|
||||
cct.Base.SourceID = paramtable.GetNodeID()
|
||||
@ -207,6 +250,11 @@ func (cct *createCollectionTask) PreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate partition key mode
|
||||
if err := cct.validatePartitionKey(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, field := range cct.schema.Fields {
|
||||
// validate field name
|
||||
if err := validateFieldName(field.Name); err != nil {
|
||||
@ -488,6 +536,7 @@ func (dct *describeCollectionTask) Execute(ctx context.Context) error {
|
||||
dct.result.ConsistencyLevel = result.ConsistencyLevel
|
||||
dct.result.Aliases = result.Aliases
|
||||
dct.result.Properties = result.Properties
|
||||
dct.result.NumPartitions = result.NumPartitions
|
||||
for _, field := range result.Schema.Fields {
|
||||
if field.IsDynamic {
|
||||
continue
|
||||
@ -503,6 +552,7 @@ func (dct *describeCollectionTask) Execute(ctx context.Context) error {
|
||||
TypeParams: field.TypeParams,
|
||||
IndexParams: field.IndexParams,
|
||||
IsDynamic: field.IsDynamic,
|
||||
IsPartitionKey: field.IsPartitionKey,
|
||||
DefaultValue: field.DefaultValue,
|
||||
})
|
||||
}
|
||||
@ -793,6 +843,14 @@ func (cpt *createPartitionTask) PreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode {
|
||||
return errors.New("disable create partition if partition key mode is used")
|
||||
}
|
||||
|
||||
if err := validatePartitionTag(partitionTag, true); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -871,6 +929,14 @@ func (dpt *dropPartitionTask) PreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode {
|
||||
return errors.New("disable drop partition if partition key mode is used")
|
||||
}
|
||||
|
||||
if err := validatePartitionTag(partitionTag, true); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1546,6 +1612,14 @@ func (lpt *loadPartitionsTask) PreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode {
|
||||
return errors.New("disable load partitions if partition key mode is used")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1676,6 +1750,14 @@ func (rpt *releasePartitionsTask) PreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode {
|
||||
return errors.New("disable release partitions if partition key mode is used")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -175,6 +175,14 @@ func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
||||
dt.deleteMsg.CollectionID = collID
|
||||
dt.collectionID = collID
|
||||
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode && len(dt.deleteMsg.PartitionName) != 0 {
|
||||
return errors.New("not support manually specifying the partition names if partition key mode is used")
|
||||
}
|
||||
|
||||
// If partitionName is not empty, partitionID will be set.
|
||||
if len(dt.deleteMsg.PartitionName) > 0 {
|
||||
partName := dt.deleteMsg.PartitionName
|
||||
|
||||
@ -36,6 +36,7 @@ type insertTask struct {
|
||||
vChannels []vChan
|
||||
pChannels []pChan
|
||||
schema *schemapb.CollectionSchema
|
||||
partitionKeys *schemapb.FieldData
|
||||
}
|
||||
|
||||
// TraceCtx returns insertTask context
|
||||
@ -108,13 +109,7 @@ func (it *insertTask) PreExecute(ctx context.Context) error {
|
||||
|
||||
collectionName := it.insertMsg.CollectionName
|
||||
if err := validateCollectionName(collectionName); err != nil {
|
||||
log.Error("valid collection name failed", zap.String("collectionName", collectionName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
partitionTag := it.insertMsg.PartitionName
|
||||
if err := validatePartitionTag(partitionTag, true); err != nil {
|
||||
log.Error("valid partition name failed", zap.String("partition name", partitionTag), zap.Error(err))
|
||||
log.Info("valid collection name failed", zap.String("collectionName", collectionName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -173,11 +168,38 @@ func (it *insertTask) PreExecute(ctx context.Context) error {
|
||||
// set field ID to insert field data
|
||||
err = fillFieldIDBySchema(it.insertMsg.GetFieldsData(), schema)
|
||||
if err != nil {
|
||||
log.Error("set fieldID to fieldData failed",
|
||||
log.Info("set fieldID to fieldData failed",
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Warn("check partition key mode failed", zap.String("collection name", collectionName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode {
|
||||
fieldSchema, _ := typeutil.GetPartitionKeyFieldSchema(it.schema)
|
||||
it.partitionKeys, err = getPartitionKeyFieldData(fieldSchema, it.insertMsg)
|
||||
if err != nil {
|
||||
log.Info("get partition keys from insert request failed", zap.String("collection name", collectionName), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// set default partition name if not use partition key
|
||||
// insert to _default partition
|
||||
partitionTag := it.insertMsg.GetPartitionName()
|
||||
if len(partitionTag) <= 0 {
|
||||
partitionTag = Params.CommonCfg.DefaultPartitionName.GetValue()
|
||||
it.insertMsg.PartitionName = partitionTag
|
||||
}
|
||||
|
||||
if err := validatePartitionTag(partitionTag, true); err != nil {
|
||||
log.Info("valid partition name failed", zap.String("partition name", partitionTag), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := newValidateUtil(withNANCheck(), withOverflowCheck()).
|
||||
Validate(it.insertMsg.GetFieldsData(), schema, it.insertMsg.NRows()); err != nil {
|
||||
return err
|
||||
@ -200,19 +222,7 @@ func (it *insertTask) Execute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
it.insertMsg.CollectionID = collID
|
||||
var partitionID UniqueID
|
||||
if len(it.insertMsg.PartitionName) > 0 {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(ctx, collectionName, it.insertMsg.PartitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(ctx, collectionName, Params.CommonCfg.DefaultPartitionName.GetValue())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
it.insertMsg.PartitionID = partitionID
|
||||
|
||||
getCacheDur := tr.RecordSpan()
|
||||
stream, err := it.chMgr.getOrCreateDmlStream(collID)
|
||||
if err != nil {
|
||||
@ -234,7 +244,6 @@ func (it *insertTask) Execute(ctx context.Context) error {
|
||||
zap.String("collection", it.insertMsg.GetCollectionName()),
|
||||
zap.String("partition", it.insertMsg.GetPartitionName()),
|
||||
zap.Int64("collection_id", collID),
|
||||
zap.Int64("partition_id", partitionID),
|
||||
zap.Strings("virtual_channels", channelNames),
|
||||
zap.Int64("task_id", it.ID()),
|
||||
zap.Duration("get cache duration", getCacheDur),
|
||||
@ -242,7 +251,11 @@ func (it *insertTask) Execute(ctx context.Context) error {
|
||||
|
||||
// assign segmentID for insert data and repack data by segmentID
|
||||
var msgPack *msgstream.MsgPack
|
||||
msgPack, err = assignSegmentID(it.TraceCtx(), it.insertMsg, it.result, channelNames, it.idAllocator, it.segIDAssigner)
|
||||
if it.partitionKeys == nil {
|
||||
msgPack, err = repackInsertData(it.TraceCtx(), channelNames, it.insertMsg, it.result, it.idAllocator, it.segIDAssigner)
|
||||
} else {
|
||||
msgPack, err = repackInsertDataWithPartitionKey(it.TraceCtx(), channelNames, it.partitionKeys, it.insertMsg, it.result, it.idAllocator, it.segIDAssigner)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("assign segmentID and repack insert data failed",
|
||||
zap.Int64("collectionID", collID),
|
||||
|
||||
@ -64,6 +64,7 @@ type queryTask struct {
|
||||
shardMgr *shardClientMgr
|
||||
|
||||
plan *planpb.PlanNode
|
||||
partitionKeyMode bool
|
||||
}
|
||||
|
||||
type queryParams struct {
|
||||
@ -225,9 +226,6 @@ func (t *queryTask) createPlan(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Ctx(ctx).Debug("translate output fields",
|
||||
zap.Strings("OutputFields", t.request.OutputFields),
|
||||
zap.String("requestType", "query"))
|
||||
|
||||
outputFieldIDs, err := translateToOutputFieldIDs(t.request.GetOutputFields(), schema)
|
||||
if err != nil {
|
||||
@ -273,6 +271,15 @@ func (t *queryTask) PreExecute(ctx context.Context) error {
|
||||
t.CollectionID = collID
|
||||
log.Debug("Get collection ID by name", zap.Int64("collectionID", t.CollectionID))
|
||||
|
||||
t.partitionKeyMode, err = isPartitionKeyMode(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Warn("check partition key mode failed", zap.Int64("collectionID", t.CollectionID))
|
||||
return err
|
||||
}
|
||||
if t.partitionKeyMode && len(t.request.GetPartitionNames()) != 0 {
|
||||
return errors.New("not support manually specifying the partition names if partition key mode is used")
|
||||
}
|
||||
|
||||
for _, tag := range t.request.PartitionNames {
|
||||
if err := validatePartitionTag(tag, false); err != nil {
|
||||
log.Warn("invalid partition name", zap.String("partition name", tag))
|
||||
@ -281,13 +288,6 @@ func (t *queryTask) PreExecute(ctx context.Context) error {
|
||||
}
|
||||
log.Debug("Validate partition names.")
|
||||
|
||||
t.RetrieveRequest.PartitionIDs, err = getPartitionIDs(ctx, collectionName, t.request.GetPartitionNames())
|
||||
if err != nil {
|
||||
log.Warn("failed to get partitions in collection.", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("Get partitions in collection.", zap.Int64s("partitionIDs", t.RetrieveRequest.GetPartitionIDs()))
|
||||
|
||||
//fetch search_growing from query param
|
||||
var ignoreGrowing bool
|
||||
for i, kv := range t.request.GetQueryParams() {
|
||||
@ -340,6 +340,25 @@ func (t *queryTask) PreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionNames := t.request.GetPartitionNames()
|
||||
if t.partitionKeyMode {
|
||||
expr, err := ParseExprFromPlan(t.plan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionKeys := ParsePartitionKeys(expr)
|
||||
hashedPartitionNames, err := assignPartitionKeys(ctx, t.request.CollectionName, partitionKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionNames = append(partitionNames, hashedPartitionNames...)
|
||||
}
|
||||
t.RetrieveRequest.PartitionIDs, err = getPartitionIDs(ctx, t.request.CollectionName, partitionNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// count with pagination
|
||||
if t.plan.GetQuery().GetIsCount() && t.queryParams.limit != typeutil.Unlimited {
|
||||
return fmt.Errorf("count entities with pagination is not allowed")
|
||||
|
||||
@ -229,11 +229,13 @@ func (t *searchTask) PreExecute(ctx context.Context) error {
|
||||
t.SearchRequest.CollectionID = collID
|
||||
t.schema, _ = globalMetaCache.GetCollectionSchema(ctx, collectionName)
|
||||
|
||||
// translate partition name to partition ids. Use regex-pattern to match partition name.
|
||||
t.SearchRequest.PartitionIDs, err = getPartitionIDs(ctx, collectionName, t.request.GetPartitionNames())
|
||||
partitionKeyMode, err := isPartitionKeyMode(ctx, collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionKeyMode && len(t.request.GetPartitionNames()) != 0 {
|
||||
return errors.New("not support manually specifying the partition names if partition key mode is used")
|
||||
}
|
||||
|
||||
t.request.OutputFields, t.userOutputFields, err = translateOutputFields(t.request.OutputFields, t.schema, false)
|
||||
if err != nil {
|
||||
@ -274,6 +276,7 @@ func (t *searchTask) PreExecute(ctx context.Context) error {
|
||||
}
|
||||
t.SearchRequest.OutputFieldsId = outputFieldIDs
|
||||
|
||||
partitionNames := t.request.GetPartitionNames()
|
||||
if t.request.GetDslType() == commonpb.DslType_BoolExprV1 {
|
||||
annsField, err := funcutil.GetAttrByKeyFromRepeatedKV(AnnsFieldKey, t.request.GetSearchParams())
|
||||
if err != nil || len(annsField) == 0 {
|
||||
@ -303,6 +306,20 @@ func (t *searchTask) PreExecute(ctx context.Context) error {
|
||||
zap.String("dsl", t.request.Dsl), // may be very large if large term passed.
|
||||
zap.String("anns field", annsField), zap.Any("query info", queryInfo))
|
||||
|
||||
if partitionKeyMode {
|
||||
expr, err := ParseExprFromPlan(plan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionKeys := ParsePartitionKeys(expr)
|
||||
hashedPartitionNames, err := assignPartitionKeys(ctx, collectionName, partitionKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionNames = append(partitionNames, hashedPartitionNames...)
|
||||
}
|
||||
|
||||
plan.OutputFieldIds = outputFieldIDs
|
||||
|
||||
t.SearchRequest.Topk = queryInfo.GetTopk()
|
||||
@ -328,6 +345,12 @@ func (t *searchTask) PreExecute(ctx context.Context) error {
|
||||
zap.String("plan", plan.String())) // may be very large if large term passed.
|
||||
}
|
||||
|
||||
// translate partition name to partition ids. Use regex-pattern to match partition name.
|
||||
t.SearchRequest.PartitionIDs, err = getPartitionIDs(ctx, collectionName, partitionNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
travelTimestamp := t.request.TravelTimestamp
|
||||
if travelTimestamp == 0 {
|
||||
travelTimestamp = typeutil.MaxTimestamp
|
||||
|
||||
@ -195,6 +195,7 @@ func TestStatisticTask_all(t *testing.T) {
|
||||
|
||||
task.statisticShardPolicy = RoundRobinPolicy
|
||||
task.fromQueryNode = true
|
||||
task.fromDataCoord = false
|
||||
qn.EXPECT().GetStatistics(mock.Anything, mock.Anything).Return(nil, nil).Once()
|
||||
assert.NoError(t, task.Execute(ctx))
|
||||
assert.NoError(t, task.PostExecute(ctx))
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
@ -38,14 +39,17 @@ import (
|
||||
"github.com/milvus-io/milvus/internal/allocator"
|
||||
"github.com/milvus-io/milvus/internal/mocks"
|
||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/distance"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/uniquegenerator"
|
||||
)
|
||||
@ -151,6 +155,17 @@ func constructCollectionSchemaEnableDynamicSchema(
|
||||
}
|
||||
}
|
||||
|
||||
func ConstructCollectionSchemaWithPartitionKey(collectionName string, fieldName2DataType map[string]schemapb.DataType, primaryFieldName string, partitionKeyFieldName string, autoID bool) *schemapb.CollectionSchema {
|
||||
schema := constructCollectionSchemaByDataType(collectionName, fieldName2DataType, primaryFieldName, autoID)
|
||||
for _, field := range schema.Fields {
|
||||
if field.Name == partitionKeyFieldName {
|
||||
field.IsPartitionKey = true
|
||||
}
|
||||
}
|
||||
|
||||
return schema
|
||||
}
|
||||
|
||||
func constructCollectionSchemaByDataType(collectionName string, fieldName2DataType map[string]schemapb.DataType, primaryFieldName string, autoID bool) *schemapb.CollectionSchema {
|
||||
fieldsSchema := make([]*schemapb.FieldSchema, 0)
|
||||
|
||||
@ -3029,3 +3044,493 @@ func TestDescribeResourceGroupTaskFailed(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.Status.ErrorCode)
|
||||
}
|
||||
|
||||
func TestCreateCollectionTaskWithPartitionKey(t *testing.T) {
|
||||
rc := NewRootCoordMock()
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
ctx := context.Background()
|
||||
shardsNum := common.DefaultShardsNum
|
||||
prefix := "TestCreateCollectionTaskWithPartitionKey"
|
||||
dbName := ""
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
int64Field := &schemapb.FieldSchema{
|
||||
Name: "int64",
|
||||
DataType: schemapb.DataType_Int64,
|
||||
IsPrimaryKey: true,
|
||||
}
|
||||
varCharField := &schemapb.FieldSchema{
|
||||
Name: "varChar",
|
||||
DataType: schemapb.DataType_VarChar,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "max_length",
|
||||
Value: strconv.Itoa(testMaxVarCharLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
floatVecField := &schemapb.FieldSchema{
|
||||
Name: "fvec",
|
||||
DataType: schemapb.DataType_FloatVector,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: strconv.Itoa(testVecDim),
|
||||
},
|
||||
},
|
||||
}
|
||||
partitionKeyField := &schemapb.FieldSchema{
|
||||
Name: "partition_key",
|
||||
DataType: schemapb.DataType_Int64,
|
||||
IsPartitionKey: true,
|
||||
}
|
||||
schema := &schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
Fields: []*schemapb.FieldSchema{int64Field, varCharField, partitionKeyField, floatVecField},
|
||||
}
|
||||
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task := &createCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()),
|
||||
Timestamp: Timestamp(time.Now().UnixNano()),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: shardsNum,
|
||||
},
|
||||
ctx: ctx,
|
||||
rootCoord: rc,
|
||||
result: nil,
|
||||
schema: nil,
|
||||
}
|
||||
|
||||
t.Run("PreExecute", func(t *testing.T) {
|
||||
var err error
|
||||
|
||||
// test default num partitions
|
||||
err = task.PreExecute(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.DefaultPartitionsWithPartitionKey, task.GetNumPartitions())
|
||||
|
||||
// test specify num partition without partition key field
|
||||
partitionKeyField.IsPartitionKey = false
|
||||
task.NumPartitions = common.DefaultPartitionsWithPartitionKey * 2
|
||||
marshaledSchema, err = proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
task.Schema = marshaledSchema
|
||||
err = task.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
partitionKeyField.IsPartitionKey = true
|
||||
|
||||
// test multi partition key field
|
||||
varCharField.IsPartitionKey = true
|
||||
marshaledSchema, err = proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
task.Schema = marshaledSchema
|
||||
err = task.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
varCharField.IsPartitionKey = false
|
||||
|
||||
// test partitions < 0
|
||||
task.NumPartitions = -2
|
||||
marshaledSchema, err = proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
task.Schema = marshaledSchema
|
||||
err = task.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
task.NumPartitions = 1000
|
||||
|
||||
// test partition key type not in [int64, varChar]
|
||||
partitionKeyField.DataType = schemapb.DataType_FloatVector
|
||||
marshaledSchema, err = proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
task.Schema = marshaledSchema
|
||||
err = task.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
partitionKeyField.DataType = schemapb.DataType_Int64
|
||||
|
||||
// test partition key field not primary key field
|
||||
primaryField, _ := typeutil.GetPrimaryFieldSchema(schema)
|
||||
primaryField.IsPartitionKey = true
|
||||
marshaledSchema, err = proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
task.Schema = marshaledSchema
|
||||
err = task.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
primaryField.IsPartitionKey = false
|
||||
|
||||
marshaledSchema, err = proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
task.Schema = marshaledSchema
|
||||
err = task.PreExecute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Execute", func(t *testing.T) {
|
||||
err = task.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check default partitions
|
||||
err = InitMetaCache(ctx, rc, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
partitionNames, err := getDefaultPartitionNames(ctx, task.CollectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, task.GetNumPartitions(), int64(len(partitionNames)))
|
||||
|
||||
createPartitionTask := &createPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreatePartitionRequest: &milvuspb.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()),
|
||||
Timestamp: Timestamp(time.Now().UnixNano()),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: "new_partition",
|
||||
},
|
||||
ctx: ctx,
|
||||
rootCoord: rc,
|
||||
}
|
||||
err = createPartitionTask.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
|
||||
dropPartitionTask := &dropPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropPartitionRequest: &milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()),
|
||||
Timestamp: Timestamp(time.Now().UnixNano()),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: "new_partition",
|
||||
},
|
||||
ctx: ctx,
|
||||
rootCoord: rc,
|
||||
}
|
||||
err = dropPartitionTask.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
|
||||
loadPartitionTask := &loadPartitionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
LoadPartitionsRequest: &milvuspb.LoadPartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()),
|
||||
Timestamp: Timestamp(time.Now().UnixNano()),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionNames: []string{"_default_0"},
|
||||
},
|
||||
ctx: ctx,
|
||||
}
|
||||
err = loadPartitionTask.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
|
||||
releasePartitionsTask := &releasePartitionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ReleasePartitionsRequest: &milvuspb.ReleasePartitionsRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()),
|
||||
Timestamp: Timestamp(time.Now().UnixNano()),
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionNames: []string{"_default_0"},
|
||||
},
|
||||
ctx: ctx,
|
||||
}
|
||||
err = releasePartitionsTask.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPartitionKey(t *testing.T) {
|
||||
rc := NewRootCoordMock()
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
qc := getQueryCoord()
|
||||
qc.Start()
|
||||
defer qc.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
mgr := newShardClientMgr()
|
||||
err := InitMetaCache(ctx, rc, qc, mgr)
|
||||
assert.NoError(t, err)
|
||||
|
||||
shardsNum := common.DefaultShardsNum
|
||||
prefix := "TestInsertTaskWithPartitionKey"
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
fieldName2Type := make(map[string]schemapb.DataType)
|
||||
fieldName2Type["int64_field"] = schemapb.DataType_Int64
|
||||
fieldName2Type["varChar_field"] = schemapb.DataType_VarChar
|
||||
fieldName2Type["fvec_field"] = schemapb.DataType_FloatVector
|
||||
schema := constructCollectionSchemaByDataType(collectionName, fieldName2Type, "int64_field", false)
|
||||
partitionKeyField := &schemapb.FieldSchema{
|
||||
Name: "partition_key_field",
|
||||
DataType: schemapb.DataType_Int64,
|
||||
IsPartitionKey: true,
|
||||
}
|
||||
fieldName2Type["partition_key_field"] = schemapb.DataType_Int64
|
||||
schema.Fields = append(schema.Fields, partitionKeyField)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
|
||||
t.Run("create collection", func(t *testing.T) {
|
||||
createCollectionTask := &createCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgID: UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt()),
|
||||
Timestamp: Timestamp(time.Now().UnixNano()),
|
||||
},
|
||||
DbName: "",
|
||||
CollectionName: collectionName,
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: shardsNum,
|
||||
NumPartitions: common.DefaultPartitionsWithPartitionKey,
|
||||
},
|
||||
ctx: ctx,
|
||||
rootCoord: rc,
|
||||
result: nil,
|
||||
schema: nil,
|
||||
}
|
||||
err = createCollectionTask.PreExecute(ctx)
|
||||
assert.NoError(t, err)
|
||||
err = createCollectionTask.Execute(ctx)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dmlChannelsFunc := getDmlChannelsFunc(ctx, rc)
|
||||
factory := newSimpleMockMsgStreamFactory()
|
||||
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, factory)
|
||||
defer chMgr.removeAllDMLStream()
|
||||
|
||||
_, err = chMgr.getOrCreateDmlStream(collectionID)
|
||||
assert.NoError(t, err)
|
||||
pchans, err := chMgr.getChannels(collectionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
interval := time.Millisecond * 10
|
||||
tso := newMockTsoAllocator()
|
||||
|
||||
ticker := newChannelsTimeTicker(ctx, interval, []string{}, newGetStatisticsFunc(pchans), tso)
|
||||
_ = ticker.start()
|
||||
defer ticker.close()
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, paramtable.GetNodeID())
|
||||
assert.NoError(t, err)
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
||||
segAllocator, err := newSegIDAssigner(ctx, &mockDataCoord{expireTime: Timestamp(2500)}, getLastTick1)
|
||||
assert.NoError(t, err)
|
||||
segAllocator.Init()
|
||||
_ = segAllocator.Start()
|
||||
defer segAllocator.Close()
|
||||
|
||||
partitionNames, err := getDefaultPartitionNames(ctx, collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, common.DefaultPartitionsWithPartitionKey, int64(len(partitionNames)))
|
||||
|
||||
nb := 10
|
||||
fieldID := common.StartOfUserFieldID
|
||||
fieldDatas := make([]*schemapb.FieldData, 0)
|
||||
for fieldName, dataType := range fieldName2Type {
|
||||
fieldData := generateFieldData(dataType, fieldName, nb)
|
||||
fieldData.FieldId = int64(fieldID)
|
||||
fieldDatas = append(fieldDatas, generateFieldData(dataType, fieldName, nb))
|
||||
fieldID++
|
||||
}
|
||||
|
||||
t.Run("Insert", func(t *testing.T) {
|
||||
it := &insertTask{
|
||||
insertMsg: &BaseInsertTask{
|
||||
BaseMsg: msgstream.BaseMsg{},
|
||||
InsertRequest: msgpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
CollectionName: collectionName,
|
||||
FieldsData: fieldDatas,
|
||||
NumRows: uint64(nb),
|
||||
Version: msgpb.InsertDataVersion_ColumnBased,
|
||||
},
|
||||
},
|
||||
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
},
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
Acknowledged: false,
|
||||
InsertCnt: 0,
|
||||
DeleteCnt: 0,
|
||||
UpsertCnt: 0,
|
||||
Timestamp: 0,
|
||||
},
|
||||
idAllocator: idAllocator,
|
||||
segIDAssigner: segAllocator,
|
||||
chMgr: chMgr,
|
||||
chTicker: ticker,
|
||||
vChannels: nil,
|
||||
pChannels: nil,
|
||||
schema: nil,
|
||||
}
|
||||
|
||||
// don't support specify partition name if use partition key
|
||||
it.insertMsg.PartitionName = partitionNames[0]
|
||||
assert.Error(t, it.PreExecute(ctx))
|
||||
|
||||
it.insertMsg.PartitionName = ""
|
||||
assert.NoError(t, it.OnEnqueue())
|
||||
assert.NoError(t, it.PreExecute(ctx))
|
||||
assert.NoError(t, it.Execute(ctx))
|
||||
assert.NoError(t, it.PostExecute(ctx))
|
||||
})
|
||||
|
||||
t.Run("Upsert", func(t *testing.T) {
|
||||
hash := generateHashKeys(nb)
|
||||
ut := &upsertTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
baseMsg: msgstream.BaseMsg{
|
||||
HashValues: hash,
|
||||
},
|
||||
req: &milvuspb.UpsertRequest{
|
||||
Base: commonpbutil.NewMsgBase(
|
||||
commonpbutil.WithMsgType(commonpb.MsgType_Upsert),
|
||||
commonpbutil.WithSourceID(paramtable.GetNodeID()),
|
||||
),
|
||||
CollectionName: collectionName,
|
||||
FieldsData: fieldDatas,
|
||||
NumRows: uint32(nb),
|
||||
},
|
||||
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
},
|
||||
IDs: &schemapb.IDs{
|
||||
IdField: nil,
|
||||
},
|
||||
},
|
||||
idAllocator: idAllocator,
|
||||
segIDAssigner: segAllocator,
|
||||
chMgr: chMgr,
|
||||
chTicker: ticker,
|
||||
}
|
||||
|
||||
// don't support specify partition name if use partition key
|
||||
ut.req.PartitionName = partitionNames[0]
|
||||
assert.Error(t, ut.PreExecute(ctx))
|
||||
|
||||
ut.req.PartitionName = ""
|
||||
assert.NoError(t, ut.OnEnqueue())
|
||||
assert.NoError(t, ut.PreExecute(ctx))
|
||||
assert.NoError(t, ut.Execute(ctx))
|
||||
assert.NoError(t, ut.PostExecute(ctx))
|
||||
})
|
||||
|
||||
t.Run("delete", func(t *testing.T) {
|
||||
dt := &deleteTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
deleteMsg: &BaseDeleteTask{
|
||||
BaseMsg: msgstream.BaseMsg{},
|
||||
DeleteRequest: msgpb.DeleteRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: paramtable.GetNodeID(),
|
||||
},
|
||||
CollectionName: collectionName,
|
||||
},
|
||||
},
|
||||
deleteExpr: "int64_field in [0, 1]",
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
},
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
Acknowledged: false,
|
||||
InsertCnt: 0,
|
||||
DeleteCnt: 0,
|
||||
UpsertCnt: 0,
|
||||
Timestamp: 0,
|
||||
},
|
||||
idAllocator: idAllocator,
|
||||
chMgr: chMgr,
|
||||
chTicker: ticker,
|
||||
}
|
||||
// don't support specify partition name if use partition key
|
||||
dt.deleteMsg.PartitionName = partitionNames[0]
|
||||
assert.Error(t, dt.PreExecute(ctx))
|
||||
|
||||
dt.deleteMsg.PartitionName = ""
|
||||
assert.NoError(t, dt.PreExecute(ctx))
|
||||
assert.NoError(t, dt.Execute(ctx))
|
||||
assert.NoError(t, dt.PostExecute(ctx))
|
||||
})
|
||||
|
||||
t.Run("search", func(t *testing.T) {
|
||||
searchTask := &searchTask{
|
||||
ctx: ctx,
|
||||
SearchRequest: &internalpb.SearchRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
},
|
||||
request: &milvuspb.SearchRequest{
|
||||
CollectionName: collectionName,
|
||||
Nq: 1,
|
||||
},
|
||||
qc: qc,
|
||||
tr: timerecord.NewTimeRecorder("test-search"),
|
||||
}
|
||||
|
||||
// don't support specify partition name if use partition key
|
||||
searchTask.request.PartitionNames = partitionNames
|
||||
err = searchTask.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("query", func(t *testing.T) {
|
||||
queryTask := &queryTask{
|
||||
ctx: ctx,
|
||||
RetrieveRequest: &internalpb.RetrieveRequest{
|
||||
Base: &commonpb.MsgBase{},
|
||||
},
|
||||
request: &milvuspb.QueryRequest{
|
||||
CollectionName: collectionName,
|
||||
},
|
||||
qc: qc,
|
||||
}
|
||||
|
||||
// don't support specify partition name if use partition key
|
||||
queryTask.request.PartitionNames = partitionNames
|
||||
err = queryTask.PreExecute(ctx)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -59,6 +59,8 @@ type upsertTask struct {
|
||||
vChannels []vChan
|
||||
pChannels []pChan
|
||||
schema *schemapb.CollectionSchema
|
||||
partitionKeyMode bool
|
||||
partitionKeys *schemapb.FieldData
|
||||
}
|
||||
|
||||
// TraceCtx returns upsertTask context
|
||||
@ -142,12 +144,6 @@ func (it *upsertTask) insertPreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitionTag := it.upsertMsg.InsertMsg.PartitionName
|
||||
if err := validatePartitionTag(partitionTag, true); err != nil {
|
||||
log.Error("valid partition name failed", zap.String("partition name", partitionTag), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
rowNums := uint32(it.upsertMsg.InsertMsg.NRows())
|
||||
// set upsertTask.insertRequest.rowIDs
|
||||
tr := timerecord.NewTimeRecorder("applyPK")
|
||||
@ -194,6 +190,23 @@ func (it *upsertTask) insertPreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if it.partitionKeyMode {
|
||||
fieldSchema, _ := typeutil.GetPartitionKeyFieldSchema(it.schema)
|
||||
it.partitionKeys, err = getPartitionKeyFieldData(fieldSchema, it.upsertMsg.InsertMsg)
|
||||
if err != nil {
|
||||
log.Info("get partition keys from insert request failed",
|
||||
zap.String("collectionName", collectionName),
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
partitionTag := it.upsertMsg.InsertMsg.PartitionName
|
||||
if err = validatePartitionTag(partitionTag, true); err != nil {
|
||||
log.Error("valid partition name failed", zap.String("partition name", partitionTag), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := newValidateUtil(withNANCheck(), withOverflowCheck()).
|
||||
Validate(it.upsertMsg.InsertMsg.GetFieldsData(), it.schema, it.upsertMsg.InsertMsg.NRows()); err != nil {
|
||||
return err
|
||||
@ -221,8 +234,13 @@ func (it *upsertTask) deletePreExecute(ctx context.Context) error {
|
||||
it.upsertMsg.DeleteMsg.CollectionID = collID
|
||||
it.collectionID = collID
|
||||
|
||||
// If partitionName is not empty, partitionID will be set.
|
||||
if len(it.upsertMsg.DeleteMsg.PartitionName) > 0 {
|
||||
if it.partitionKeyMode {
|
||||
// multi entities with same pk and diff partition keys may be hashed to multi physical partitions
|
||||
// if deleteMsg.partitionID = common.InvalidPartition,
|
||||
// all segments with this pk under the collection will have the delete record
|
||||
it.upsertMsg.DeleteMsg.PartitionID = common.InvalidPartitionID
|
||||
} else {
|
||||
// partition name could be defaultPartitionName or name specified by sdk
|
||||
partName := it.upsertMsg.DeleteMsg.PartitionName
|
||||
if err := validatePartitionTag(partName, true); err != nil {
|
||||
log.Info("Invalid partition name", zap.String("partitionName", partName), zap.Error(err))
|
||||
@ -234,8 +252,6 @@ func (it *upsertTask) deletePreExecute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
it.upsertMsg.DeleteMsg.PartitionID = partID
|
||||
} else {
|
||||
it.upsertMsg.DeleteMsg.PartitionID = common.InvalidPartitionID
|
||||
}
|
||||
|
||||
it.upsertMsg.DeleteMsg.Timestamps = make([]uint64, it.upsertMsg.DeleteMsg.NumRows)
|
||||
@ -249,7 +265,9 @@ func (it *upsertTask) deletePreExecute(ctx context.Context) error {
|
||||
func (it *upsertTask) PreExecute(ctx context.Context) error {
|
||||
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-Upsert-PreExecute")
|
||||
defer sp.End()
|
||||
log := log.Ctx(ctx).With(zap.String("collectionName", it.req.CollectionName))
|
||||
|
||||
collectionName := it.req.CollectionName
|
||||
log := log.Ctx(ctx).With(zap.String("collectionName", collectionName))
|
||||
|
||||
it.result = &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
@ -261,13 +279,36 @@ func (it *upsertTask) PreExecute(ctx context.Context) error {
|
||||
Timestamp: it.EndTs(),
|
||||
}
|
||||
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, it.req.CollectionName)
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Info("Failed to get collection schema", zap.Error(err))
|
||||
log.Info("Failed to get collection schema",
|
||||
zap.String("collectionName", collectionName),
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
it.schema = schema
|
||||
|
||||
it.partitionKeyMode, err = isPartitionKeyMode(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Warn("check partition key mode failed",
|
||||
zap.String("collectionName", collectionName),
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
if it.partitionKeyMode {
|
||||
if len(it.req.GetPartitionName()) > 0 {
|
||||
return errors.New("not support manually specifying the partition names if partition key mode is used")
|
||||
}
|
||||
} else {
|
||||
// set default partition name if not use partition key
|
||||
// insert to _default partition
|
||||
partitionTag := it.req.GetPartitionName()
|
||||
if len(partitionTag) <= 0 {
|
||||
partitionTag = Params.CommonCfg.DefaultPartitionName.GetValue()
|
||||
it.req.PartitionName = partitionTag
|
||||
}
|
||||
}
|
||||
|
||||
it.upsertMsg = &msgstream.UpsertMsg{
|
||||
InsertMsg: &msgstream.InsertMsg{
|
||||
InsertRequest: msgpb.InsertRequest{
|
||||
@ -332,19 +373,6 @@ func (it *upsertTask) insertExecute(ctx context.Context, msgPack *msgstream.MsgP
|
||||
it.upsertMsg.InsertMsg.CollectionID = collID
|
||||
log := log.Ctx(ctx).With(
|
||||
zap.Int64("collectionID", collID))
|
||||
var partitionID UniqueID
|
||||
if len(it.upsertMsg.InsertMsg.PartitionName) > 0 {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(ctx, collectionName, it.req.PartitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(ctx, collectionName, Params.CommonCfg.DefaultPartitionName.GetValue())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
it.upsertMsg.InsertMsg.PartitionID = partitionID
|
||||
getCacheDur := tr.RecordSpan()
|
||||
|
||||
_, err = it.chMgr.getOrCreateDmlStream(collID)
|
||||
@ -365,14 +393,18 @@ func (it *upsertTask) insertExecute(ctx context.Context, msgPack *msgstream.MsgP
|
||||
zap.String("collection", it.req.GetCollectionName()),
|
||||
zap.String("partition", it.req.GetPartitionName()),
|
||||
zap.Int64("collection_id", collID),
|
||||
zap.Int64("partition_id", partitionID),
|
||||
zap.Strings("virtual_channels", channelNames),
|
||||
zap.Int64("task_id", it.ID()),
|
||||
zap.Duration("get cache duration", getCacheDur),
|
||||
zap.Duration("get msgStream duration", getMsgStreamDur))
|
||||
|
||||
// assign segmentID for insert data and repack data by segmentID
|
||||
insertMsgPack, err := assignSegmentID(it.TraceCtx(), it.upsertMsg.InsertMsg, it.result, channelNames, it.idAllocator, it.segIDAssigner)
|
||||
var insertMsgPack *msgstream.MsgPack
|
||||
if it.partitionKeys == nil {
|
||||
insertMsgPack, err = repackInsertData(it.TraceCtx(), channelNames, it.upsertMsg.InsertMsg, it.result, it.idAllocator, it.segIDAssigner)
|
||||
} else {
|
||||
insertMsgPack, err = repackInsertDataWithPartitionKey(it.TraceCtx(), channelNames, it.partitionKeys, it.upsertMsg.InsertMsg, it.result, it.idAllocator, it.segIDAssigner)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("assign segmentID and repack insert data failed when insertExecute",
|
||||
zap.Error(err))
|
||||
|
||||
@ -35,6 +35,7 @@ import (
|
||||
"github.com/milvus-io/milvus/internal/proto/planpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
typeutil2 "github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
@ -1033,7 +1034,7 @@ func checkPrimaryFieldData(schema *schemapb.CollectionSchema, result *milvuspb.M
|
||||
if !primaryFieldSchema.AutoID {
|
||||
primaryFieldData, err = typeutil.GetPrimaryFieldData(insertMsg.GetFieldsData(), primaryFieldSchema)
|
||||
if err != nil {
|
||||
log.Error("get primary field data failed", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err))
|
||||
log.Info("get primary field data failed", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
@ -1044,7 +1045,7 @@ func checkPrimaryFieldData(schema *schemapb.CollectionSchema, result *milvuspb.M
|
||||
// if autoID == true, currently only support autoID for int64 PrimaryField
|
||||
primaryFieldData, err = autoGenPrimaryFieldData(primaryFieldSchema, insertMsg.GetRowIDs())
|
||||
if err != nil {
|
||||
log.Error("generate primary field data failed when autoID == true", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err))
|
||||
log.Info("generate primary field data failed when autoID == true", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
// if autoID == true, set the primary field data
|
||||
@ -1070,13 +1071,27 @@ func checkPrimaryFieldData(schema *schemapb.CollectionSchema, result *milvuspb.M
|
||||
// parse primaryFieldData to result.IDs, and as returned primary keys
|
||||
ids, err := parsePrimaryFieldData2IDs(primaryFieldData)
|
||||
if err != nil {
|
||||
log.Error("parse primary field data to IDs failed", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err))
|
||||
log.Warn("parse primary field data to IDs failed", zap.String("collectionName", insertMsg.CollectionName), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func getPartitionKeyFieldData(fieldSchema *schemapb.FieldSchema, insertMsg *msgstream.InsertMsg) (*schemapb.FieldData, error) {
|
||||
if len(insertMsg.GetPartitionName()) > 0 {
|
||||
return nil, errors.New("not support manually specifying the partition names if partition key mode is used")
|
||||
}
|
||||
|
||||
for _, fieldData := range insertMsg.GetFieldsData() {
|
||||
if fieldData.GetFieldId() == fieldSchema.GetFieldID() {
|
||||
return fieldData, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("partition key not specify when insert")
|
||||
}
|
||||
|
||||
func getCollectionProgress(
|
||||
ctx context.Context,
|
||||
queryCoord types.QueryCoord,
|
||||
@ -1188,6 +1203,84 @@ func getPartitionProgress(
|
||||
return
|
||||
}
|
||||
|
||||
func isPartitionKeyMode(ctx context.Context, colName string) (bool, error) {
|
||||
colSchema, err := globalMetaCache.GetCollectionSchema(ctx, colName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, fieldSchema := range colSchema.GetFields() {
|
||||
if fieldSchema.IsPartitionKey {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// getDefaultPartitionNames only used in partition key mode
|
||||
func getDefaultPartitionNames(ctx context.Context, collectionName string) ([]string, error) {
|
||||
partitions, err := globalMetaCache.GetPartitions(ctx, collectionName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make sure the order of the partition names got every time is the same
|
||||
partitionNames := make([]string, len(partitions))
|
||||
for partitionName := range partitions {
|
||||
splits := strings.Split(partitionName, "_")
|
||||
if len(splits) < 2 {
|
||||
err = fmt.Errorf("bad default partion name in partition ket mode: %s", partitionName)
|
||||
return nil, err
|
||||
}
|
||||
index, err := strconv.ParseInt(splits[len(splits)-1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
partitionNames[index] = partitionName
|
||||
}
|
||||
|
||||
return partitionNames, nil
|
||||
}
|
||||
|
||||
func assignChannelsByPK(pks *schemapb.IDs, channelNames []string, insertMsg *msgstream.InsertMsg) map[string][]int {
|
||||
insertMsg.HashValues = typeutil.HashPK2Channels(pks, channelNames)
|
||||
|
||||
// groupedHashKeys represents the dmChannel index
|
||||
channel2RowOffsets := make(map[string][]int) // channelName to count
|
||||
// assert len(it.hashValues) < maxInt
|
||||
for offset, channelID := range insertMsg.HashValues {
|
||||
channelName := channelNames[channelID]
|
||||
if _, ok := channel2RowOffsets[channelName]; !ok {
|
||||
channel2RowOffsets[channelName] = []int{}
|
||||
}
|
||||
channel2RowOffsets[channelName] = append(channel2RowOffsets[channelName], offset)
|
||||
}
|
||||
|
||||
return channel2RowOffsets
|
||||
}
|
||||
|
||||
func assignPartitionKeys(ctx context.Context, collName string, keys []*planpb.GenericValue) ([]string, error) {
|
||||
partitionNames, err := getDefaultPartitionNames(ctx, collName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, collName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
partitionKeyFieldSchema, err := typeutil.GetPartitionKeyFieldSchema(schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashedPartitionNames, err := typeutil2.HashKey2Partitions(partitionKeyFieldSchema, keys, partitionNames)
|
||||
return hashedPartitionNames, err
|
||||
}
|
||||
|
||||
func memsetLoop[T any](v T, numRows int) []T {
|
||||
ret := make([]T, 0, numRows)
|
||||
for i := 0; i < numRows; i++ {
|
||||
|
||||
@ -50,8 +50,9 @@ type createCollectionTask struct {
|
||||
Req *milvuspb.CreateCollectionRequest
|
||||
schema *schemapb.CollectionSchema
|
||||
collID UniqueID
|
||||
partID UniqueID
|
||||
partIDs []UniqueID
|
||||
channels collectionChannels
|
||||
partitionNames []string
|
||||
}
|
||||
|
||||
func (t *createCollectionTask) validate() error {
|
||||
@ -224,10 +225,46 @@ func (t *createCollectionTask) assignCollectionID() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *createCollectionTask) assignPartitionID() error {
|
||||
var err error
|
||||
t.partID, err = t.core.idAllocator.AllocOne()
|
||||
func (t *createCollectionTask) assignPartitionIDs() error {
|
||||
t.partitionNames = make([]string, 0)
|
||||
defaultPartitionName := Params.CommonCfg.DefaultPartitionName.GetValue()
|
||||
|
||||
_, err := typeutil.GetPartitionKeyFieldSchema(t.schema)
|
||||
if err == nil {
|
||||
partitionNums := t.Req.GetNumPartitions()
|
||||
// double check, default num of physical partitions should be greater than 0
|
||||
if partitionNums <= 0 {
|
||||
return errors.New("the specified partitions should be greater than 0 if partition key is used")
|
||||
}
|
||||
|
||||
cfgMaxPartitionNum := Params.RootCoordCfg.MaxPartitionNum.GetAsInt64()
|
||||
if partitionNums > cfgMaxPartitionNum {
|
||||
return fmt.Errorf("partition number (%d) exceeds max configuration (%d), collection: %s",
|
||||
partitionNums, cfgMaxPartitionNum, t.Req.CollectionName)
|
||||
}
|
||||
|
||||
for i := int64(0); i < partitionNums; i++ {
|
||||
t.partitionNames = append(t.partitionNames, fmt.Sprintf("%s_%d", defaultPartitionName, i))
|
||||
}
|
||||
} else {
|
||||
// compatible with old versions <= 2.2.8
|
||||
t.partitionNames = append(t.partitionNames, defaultPartitionName)
|
||||
}
|
||||
|
||||
t.partIDs = make([]UniqueID, len(t.partitionNames))
|
||||
start, end, err := t.core.idAllocator.Alloc(uint32(len(t.partitionNames)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := start; i < end; i++ {
|
||||
t.partIDs[i-start] = i
|
||||
}
|
||||
log.Info("assign partitions when create collection",
|
||||
zap.String("collectionName", t.Req.GetCollectionName()),
|
||||
zap.Strings("partitionNames", t.partitionNames))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *createCollectionTask) assignChannels() error {
|
||||
@ -264,7 +301,7 @@ func (t *createCollectionTask) Prepare(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.assignPartitionID(); err != nil {
|
||||
if err := t.assignPartitionIDs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -274,7 +311,7 @@ func (t *createCollectionTask) Prepare(ctx context.Context) error {
|
||||
func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context) *ms.MsgPack {
|
||||
ts := t.GetTs()
|
||||
collectionID := t.collID
|
||||
partitionID := t.partID
|
||||
partitionIDs := t.partIDs
|
||||
// error won't happen here.
|
||||
marshaledSchema, _ := proto.Marshal(t.schema)
|
||||
pChannels := t.channels.physicalChannels
|
||||
@ -295,7 +332,7 @@ func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context) *ms.M
|
||||
commonpbutil.WithTimeStamp(ts),
|
||||
),
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
Schema: marshaledSchema,
|
||||
VirtualChannelNames: vChannels,
|
||||
PhysicalChannelNames: pChannels,
|
||||
@ -313,7 +350,7 @@ func (t *createCollectionTask) addChannelsAndGetStartPositions(ctx context.Conte
|
||||
|
||||
func (t *createCollectionTask) Execute(ctx context.Context) error {
|
||||
collID := t.collID
|
||||
partID := t.partID
|
||||
partIDs := t.partIDs
|
||||
ts := t.GetTs()
|
||||
|
||||
vchanNames := t.channels.virtualChannels
|
||||
@ -326,6 +363,17 @@ func (t *createCollectionTask) Execute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
partitions := make([]*model.Partition, len(partIDs))
|
||||
for i, partID := range partIDs {
|
||||
partitions[i] = &model.Partition{
|
||||
PartitionID: partID,
|
||||
PartitionName: t.partitionNames[i],
|
||||
PartitionCreatedTimestamp: ts,
|
||||
CollectionID: collID,
|
||||
State: pb.PartitionState_PartitionCreated,
|
||||
}
|
||||
}
|
||||
|
||||
collInfo := model.Collection{
|
||||
CollectionID: collID,
|
||||
Name: t.schema.Name,
|
||||
@ -339,15 +387,7 @@ func (t *createCollectionTask) Execute(ctx context.Context) error {
|
||||
StartPositions: toKeyDataPairs(startPositions),
|
||||
CreateTime: ts,
|
||||
State: pb.CollectionState_CollectionCreating,
|
||||
Partitions: []*model.Partition{
|
||||
{
|
||||
PartitionID: partID,
|
||||
PartitionName: Params.CommonCfg.DefaultPartitionName.GetValue(),
|
||||
PartitionCreatedTimestamp: ts,
|
||||
CollectionID: collID,
|
||||
State: pb.PartitionState_PartitionCreated,
|
||||
},
|
||||
},
|
||||
Partitions: partitions,
|
||||
Properties: t.Req.Properties,
|
||||
EnableDynamicField: t.schema.EnableDynamicField,
|
||||
}
|
||||
@ -356,7 +396,6 @@ func (t *createCollectionTask) Execute(ctx context.Context) error {
|
||||
// if add collection successfully due to idempotency check. Some steps may be risky to be duplicate executed if they
|
||||
// are not promised idempotent.
|
||||
clone := collInfo.Clone()
|
||||
clone.Partitions = []*model.Partition{{PartitionName: Params.CommonCfg.DefaultPartitionName.GetValue()}}
|
||||
// need double check in meta table if we can't promise the sequence execution.
|
||||
existedCollInfo, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp)
|
||||
if err == nil {
|
||||
|
||||
@ -489,7 +489,6 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
||||
Fields: model.UnmarshalFieldModels(schema.GetFields()),
|
||||
VirtualChannelNames: channels.virtualChannels,
|
||||
PhysicalChannelNames: channels.physicalChannels,
|
||||
Partitions: []*model.Partition{{PartitionName: Params.CommonCfg.DefaultPartitionName.GetValue()}},
|
||||
}
|
||||
|
||||
meta := newMockMetaTable()
|
||||
@ -738,3 +737,58 @@ func Test_createCollectionTask_Execute(t *testing.T) {
|
||||
assert.Zero(t, len(ticker.listDmlChannels()))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_createCollectionTask_PartitionKey(t *testing.T) {
|
||||
defer cleanTestEnv()
|
||||
|
||||
collectionName := funcutil.GenRandomStr()
|
||||
field1 := funcutil.GenRandomStr()
|
||||
ticker := newRocksMqTtSynchronizer()
|
||||
|
||||
meta := mockrootcoord.NewIMetaTable(t)
|
||||
core := newTestCore(withValidIDAllocator(), withTtSynchronizer(ticker), withMeta(meta))
|
||||
|
||||
partitionKeyField := &schemapb.FieldSchema{
|
||||
Name: field1,
|
||||
DataType: schemapb.DataType_Int64,
|
||||
IsPartitionKey: true,
|
||||
}
|
||||
|
||||
schema := &schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
Description: "",
|
||||
AutoID: false,
|
||||
Fields: []*schemapb.FieldSchema{partitionKeyField},
|
||||
}
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task := createCollectionTask{
|
||||
baseTask: newBaseTask(context.TODO(), core),
|
||||
Req: &milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
|
||||
CollectionName: collectionName,
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: common.DefaultShardsNum,
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("without num partition", func(t *testing.T) {
|
||||
task.Req.NumPartitions = 0
|
||||
err = task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("num partition too large", func(t *testing.T) {
|
||||
task.Req.NumPartitions = Params.RootCoordCfg.MaxPartitionNum.GetAsInt64() + 1
|
||||
err = task.Prepare(context.Background())
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
task.Req.NumPartitions = common.DefaultPartitionsWithPartitionKey
|
||||
|
||||
t.Run("normal case", func(t *testing.T) {
|
||||
err = task.Prepare(context.Background())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -51,6 +51,7 @@ import (
|
||||
tso2 "github.com/milvus-io/milvus/internal/tso"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/internal/util/importutil"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
tsoutil2 "github.com/milvus-io/milvus/internal/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
@ -927,6 +928,7 @@ func convertModelToDesc(collInfo *model.Collection, aliases []string) *milvuspb.
|
||||
resp.StartPositions = collInfo.StartPositions
|
||||
resp.CollectionName = resp.Schema.Name
|
||||
resp.Properties = collInfo.Properties
|
||||
resp.NumPartitions = int64(len(collInfo.Partitions))
|
||||
return resp
|
||||
}
|
||||
|
||||
@ -1677,6 +1679,37 @@ func (c *Core) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvus
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isBackUp := importutil.IsBackup(req.GetOptions())
|
||||
if isBackUp {
|
||||
if len(req.GetPartitionName()) == 0 {
|
||||
log.Info("partition name not specified when backup recovery",
|
||||
zap.String("collection name", req.GetCollectionName()))
|
||||
ret := &milvuspb.ImportResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError,
|
||||
"partition name not specified when backup"),
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
} else {
|
||||
// In v2.2.9, bulkdinsert cannot support partition key, return error to client.
|
||||
// Remove the following lines after bulkinsert can support partition key
|
||||
for _, field := range colInfo.Fields {
|
||||
if field.IsPartitionKey {
|
||||
log.Info("partition key is not yet supported by bulkinsert",
|
||||
zap.String("collection name", req.GetCollectionName()),
|
||||
zap.String("partition key", field.Name))
|
||||
ret := &milvuspb.ImportResponse{
|
||||
Status: failStatus(commonpb.ErrorCode_UnexpectedError,
|
||||
fmt.Sprintf("the collection '%s' contains partition key '%s', partition key is not yet supported by bulkinsert",
|
||||
req.GetCollectionName(), field.Name)),
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
// Remove the upper lines after bulkinsert can support partition key
|
||||
}
|
||||
|
||||
cID := colInfo.CollectionID
|
||||
req.ChannelNames = c.meta.GetCollectionVirtualChannels(cID)
|
||||
if req.GetPartitionName() == "" {
|
||||
|
||||
@ -41,6 +41,7 @@ import (
|
||||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/internal/util/importutil"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/etcd"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
@ -950,6 +951,48 @@ func TestCore_Import(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("backup without partition name", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withMeta(meta))
|
||||
|
||||
coll := &model.Collection{
|
||||
Name: "a-good-name",
|
||||
}
|
||||
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
|
||||
return coll.Clone(), nil
|
||||
}
|
||||
resp, _ := c.Import(ctx, &milvuspb.ImportRequest{
|
||||
CollectionName: "a-good-name",
|
||||
Options: []*commonpb.KeyValuePair{
|
||||
{Key: importutil.BackupFlag, Value: "true"},
|
||||
},
|
||||
})
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
|
||||
})
|
||||
|
||||
// Remove the following case after bulkinsert can support partition key
|
||||
t.Run("unsupport partition key", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := newTestCore(withHealthyCode(),
|
||||
withMeta(meta))
|
||||
|
||||
coll := &model.Collection{
|
||||
Name: "a-good-name",
|
||||
Fields: []*model.Field{
|
||||
{IsPartitionKey: true},
|
||||
},
|
||||
}
|
||||
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
|
||||
return coll.Clone(), nil
|
||||
}
|
||||
resp, _ := c.Import(ctx, &milvuspb.ImportRequest{
|
||||
CollectionName: "a-good-name",
|
||||
})
|
||||
assert.NotNil(t, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCore_GetImportState(t *testing.T) {
|
||||
|
||||
@ -40,6 +40,10 @@ func (bm *MockMsg) ID() msgstream.UniqueID {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (bm *MockMsg) SetID(id msgstream.UniqueID) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
func (bm *MockMsg) BeginTs() Timestamp {
|
||||
return 0
|
||||
}
|
||||
|
||||
47
internal/util/typeutil/hash.go
Normal file
47
internal/util/typeutil/hash.go
Normal file
@ -0,0 +1,47 @@
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/planpb"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
// HashKey2Partitions hash partition keys to partitions
|
||||
func HashKey2Partitions(fieldSchema *schemapb.FieldSchema, keys []*planpb.GenericValue, partitionNames []string) ([]string, error) {
|
||||
selectedPartitions := make(map[string]struct{})
|
||||
numPartitions := uint32(len(partitionNames))
|
||||
switch fieldSchema.GetDataType() {
|
||||
case schemapb.DataType_Int64:
|
||||
for _, key := range keys {
|
||||
if int64Val, ok := key.GetVal().(*planpb.GenericValue_Int64Val); ok {
|
||||
value, _ := typeutil.Hash32Int64(int64Val.Int64Val)
|
||||
partitionName := partitionNames[value%numPartitions]
|
||||
selectedPartitions[partitionName] = struct{}{}
|
||||
} else {
|
||||
return nil, errors.New("the data type of the data and the schema do not match")
|
||||
}
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
for _, key := range keys {
|
||||
if stringVal, ok := key.GetVal().(*planpb.GenericValue_StringVal); ok {
|
||||
value := typeutil.HashString2Uint32(stringVal.StringVal)
|
||||
partitionName := partitionNames[value%numPartitions]
|
||||
selectedPartitions[partitionName] = struct{}{}
|
||||
} else {
|
||||
return nil, errors.New("the data type of the data and the schema do not match")
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("currently only support DataType Int64 or VarChar as partition keys")
|
||||
|
||||
}
|
||||
|
||||
result := make([]string, 0)
|
||||
for partitionName := range selectedPartitions {
|
||||
result = append(result, partitionName)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@ -47,6 +47,9 @@ const (
|
||||
// DefaultShardsNum defines the default number of shards when creating a collection
|
||||
DefaultShardsNum = int32(1)
|
||||
|
||||
// DefaultPartitionsWithPartitionKey defines the default number of partitions when use partition key
|
||||
DefaultPartitionsWithPartitionKey = int64(64)
|
||||
|
||||
// InvalidPartitionID indicates that the partition is not specified. It will be set when the partitionName is empty
|
||||
InvalidPartitionID = int64(-1)
|
||||
|
||||
|
||||
@ -1071,6 +1071,10 @@ func (t *MarshalFailTsMsg) ID() UniqueID {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *MarshalFailTsMsg) SetID(id UniqueID) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
func (t *MarshalFailTsMsg) Type() MsgType {
|
||||
return commonpb.MsgType_Undefined
|
||||
}
|
||||
|
||||
@ -42,6 +42,7 @@ type TsMsg interface {
|
||||
TraceCtx() context.Context
|
||||
SetTraceCtx(ctx context.Context)
|
||||
ID() UniqueID
|
||||
SetID(id UniqueID)
|
||||
BeginTs() Timestamp
|
||||
EndTs() Timestamp
|
||||
Type() MsgType
|
||||
@ -122,6 +123,11 @@ func (it *InsertMsg) ID() UniqueID {
|
||||
return it.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (it *InsertMsg) SetID(id UniqueID) {
|
||||
it.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (it *InsertMsg) Type() MsgType {
|
||||
return it.Base.MsgType
|
||||
@ -299,6 +305,11 @@ func (dt *DeleteMsg) ID() UniqueID {
|
||||
return dt.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (dt *DeleteMsg) SetID(id UniqueID) {
|
||||
dt.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (dt *DeleteMsg) Type() MsgType {
|
||||
return dt.Base.MsgType
|
||||
@ -399,6 +410,11 @@ func (tst *TimeTickMsg) ID() UniqueID {
|
||||
return tst.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (tst *TimeTickMsg) SetID(id UniqueID) {
|
||||
tst.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (tst *TimeTickMsg) Type() MsgType {
|
||||
return tst.Base.MsgType
|
||||
@ -454,6 +470,11 @@ func (cc *CreateCollectionMsg) ID() UniqueID {
|
||||
return cc.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (cc *CreateCollectionMsg) SetID(id UniqueID) {
|
||||
cc.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (cc *CreateCollectionMsg) Type() MsgType {
|
||||
return cc.Base.MsgType
|
||||
@ -509,6 +530,11 @@ func (dc *DropCollectionMsg) ID() UniqueID {
|
||||
return dc.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (dc *DropCollectionMsg) SetID(id UniqueID) {
|
||||
dc.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (dc *DropCollectionMsg) Type() MsgType {
|
||||
return dc.Base.MsgType
|
||||
@ -564,6 +590,11 @@ func (cp *CreatePartitionMsg) ID() UniqueID {
|
||||
return cp.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (cp *CreatePartitionMsg) SetID(id UniqueID) {
|
||||
cp.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (cp *CreatePartitionMsg) Type() MsgType {
|
||||
return cp.Base.MsgType
|
||||
@ -619,6 +650,11 @@ func (dp *DropPartitionMsg) ID() UniqueID {
|
||||
return dp.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (dp *DropPartitionMsg) SetID(id UniqueID) {
|
||||
dp.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (dp *DropPartitionMsg) Type() MsgType {
|
||||
return dp.Base.MsgType
|
||||
@ -674,6 +710,11 @@ func (m *DataNodeTtMsg) ID() UniqueID {
|
||||
return m.Base.MsgID
|
||||
}
|
||||
|
||||
// SetID set the ID of this message pack
|
||||
func (m *DataNodeTtMsg) SetID(id UniqueID) {
|
||||
m.Base.MsgID = id
|
||||
}
|
||||
|
||||
// Type returns the type of this message pack
|
||||
func (m *DataNodeTtMsg) Type() MsgType {
|
||||
return m.Base.MsgType
|
||||
|
||||
@ -20,9 +20,10 @@ import (
|
||||
"hash/crc32"
|
||||
"unsafe"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/spaolacci/murmur3"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
)
|
||||
|
||||
@ -94,3 +95,34 @@ func HashPK2Channels(primaryKeys *schemapb.IDs, shardNames []string) []uint32 {
|
||||
|
||||
return hashValues
|
||||
}
|
||||
|
||||
// HashKey2Partitions hash partition keys to partitions
|
||||
func HashKey2Partitions(keys *schemapb.FieldData, partitionNames []string) ([]uint32, error) {
|
||||
var hashValues []uint32
|
||||
numPartitions := uint32(len(partitionNames))
|
||||
switch keys.Field.(type) {
|
||||
case *schemapb.FieldData_Scalars:
|
||||
scalarField := keys.GetScalars()
|
||||
switch scalarField.Data.(type) {
|
||||
case *schemapb.ScalarField_LongData:
|
||||
longKeys := scalarField.GetLongData().Data
|
||||
for _, key := range longKeys {
|
||||
value, _ := Hash32Int64(key)
|
||||
hashValues = append(hashValues, value%numPartitions)
|
||||
}
|
||||
case *schemapb.ScalarField_StringData:
|
||||
stringKeys := scalarField.GetStringData().Data
|
||||
for _, key := range stringKeys {
|
||||
value := HashString2Uint32(key)
|
||||
hashValues = append(hashValues, value%numPartitions)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("currently only support DataType Int64 or VarChar as partition key Field")
|
||||
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("currently not support vector field as partition keys")
|
||||
}
|
||||
|
||||
return hashValues, nil
|
||||
}
|
||||
|
||||
@ -22,9 +22,9 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/schemapb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
)
|
||||
@ -192,6 +192,7 @@ type SchemaHelper struct {
|
||||
nameOffset map[string]int
|
||||
idOffset map[int64]int
|
||||
primaryKeyOffset int
|
||||
partitionKeyOffset int
|
||||
}
|
||||
|
||||
// CreateSchemaHelper returns a new SchemaHelper object
|
||||
@ -199,7 +200,7 @@ func CreateSchemaHelper(schema *schemapb.CollectionSchema) (*SchemaHelper, error
|
||||
if schema == nil {
|
||||
return nil, errors.New("schema is nil")
|
||||
}
|
||||
schemaHelper := SchemaHelper{schema: schema, nameOffset: make(map[string]int), idOffset: make(map[int64]int), primaryKeyOffset: -1}
|
||||
schemaHelper := SchemaHelper{schema: schema, nameOffset: make(map[string]int), idOffset: make(map[int64]int), primaryKeyOffset: -1, partitionKeyOffset: -1}
|
||||
for offset, field := range schema.Fields {
|
||||
if _, ok := schemaHelper.nameOffset[field.Name]; ok {
|
||||
return nil, fmt.Errorf("duplicated fieldName: %s", field.Name)
|
||||
@ -215,6 +216,13 @@ func CreateSchemaHelper(schema *schemapb.CollectionSchema) (*SchemaHelper, error
|
||||
}
|
||||
schemaHelper.primaryKeyOffset = offset
|
||||
}
|
||||
|
||||
if field.IsPartitionKey {
|
||||
if schemaHelper.partitionKeyOffset != -1 {
|
||||
return nil, errors.New("partition key is not unique")
|
||||
}
|
||||
schemaHelper.partitionKeyOffset = offset
|
||||
}
|
||||
}
|
||||
return &schemaHelper, nil
|
||||
}
|
||||
@ -227,6 +235,14 @@ func (helper *SchemaHelper) GetPrimaryKeyField() (*schemapb.FieldSchema, error)
|
||||
return helper.schema.Fields[helper.primaryKeyOffset], nil
|
||||
}
|
||||
|
||||
// GetPartitionKeyField returns the schema of the partition key
|
||||
func (helper *SchemaHelper) GetPartitionKeyField() (*schemapb.FieldSchema, error) {
|
||||
if helper.partitionKeyOffset == -1 {
|
||||
return nil, fmt.Errorf("failed to get partition key field: no partition key in schema")
|
||||
}
|
||||
return helper.schema.Fields[helper.partitionKeyOffset], nil
|
||||
}
|
||||
|
||||
// GetFieldFromName is used to find the schema by field name
|
||||
func (helper *SchemaHelper) GetFieldFromName(fieldName string) (*schemapb.FieldSchema, error) {
|
||||
offset, ok := helper.nameOffset[fieldName]
|
||||
@ -710,6 +726,17 @@ func GetPrimaryFieldSchema(schema *schemapb.CollectionSchema) (*schemapb.FieldSc
|
||||
return nil, errors.New("primary field is not found")
|
||||
}
|
||||
|
||||
// GetPartitionKeyFieldSchema get partition field schema from collection schema
|
||||
func GetPartitionKeyFieldSchema(schema *schemapb.CollectionSchema) (*schemapb.FieldSchema, error) {
|
||||
for _, fieldSchema := range schema.Fields {
|
||||
if fieldSchema.IsPartitionKey {
|
||||
return fieldSchema, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("partition key field is not found")
|
||||
}
|
||||
|
||||
// GetPrimaryFieldData get primary field data from all field data inserted from sdk
|
||||
func GetPrimaryFieldData(datas []*schemapb.FieldData, primaryFieldSchema *schemapb.FieldSchema) (*schemapb.FieldData, error) {
|
||||
primaryFieldID := primaryFieldSchema.FieldID
|
||||
|
||||
@ -646,7 +646,7 @@ class TestCollectionParams(TestcaseBase):
|
||||
int_field_one = cf.gen_int64_field(is_primary=True)
|
||||
int_field_two = cf.gen_int64_field(name="int2")
|
||||
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
|
||||
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
|
||||
error = {ct.err_code: 0, ct.err_msg: "Expected only one primary key field"}
|
||||
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user