refine index node (#18545)

Signed-off-by: Zach41 <zongmei.zhang@zilliz.com>
This commit is contained in:
Zach 2022-08-09 15:38:50 +08:00 committed by GitHub
parent 4edc8d3f81
commit d3c478f03c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 3244 additions and 2250 deletions

View File

@ -388,74 +388,74 @@ const char descriptor_table_protodef_common_2eproto[] PROTOBUF_SECTION_VARIABLE(
"foCacheFailure\020+\022\025\n\021ListPolicyFailure\020,\022"
"\022\n\016NotShardLeader\020-\022\026\n\022NoReplicaAvailabl"
"e\020.\022\023\n\017SegmentNotFound\020/\022\022\n\rDDRequestRac"
"e\020\350\007*X\n\nIndexState\022\022\n\016IndexStateNone\020\000\022\014"
"e\020\350\007*g\n\nIndexState\022\022\n\016IndexStateNone\020\000\022\014"
"\n\010Unissued\020\001\022\016\n\nInProgress\020\002\022\014\n\010Finished"
"\020\003\022\n\n\006Failed\020\004*\202\001\n\014SegmentState\022\024\n\020Segme"
"ntStateNone\020\000\022\014\n\010NotExist\020\001\022\013\n\007Growing\020\002"
"\022\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004\022\014\n\010Flushing\020\005\022"
"\013\n\007Dropped\020\006\022\r\n\tImporting\020\007*>\n\017Placehold"
"erType\022\010\n\004None\020\000\022\020\n\014BinaryVector\020d\022\017\n\013Fl"
"oatVector\020e*\266\014\n\007MsgType\022\r\n\tUndefined\020\000\022\024"
"\n\020CreateCollection\020d\022\022\n\016DropCollection\020e"
"\022\021\n\rHasCollection\020f\022\026\n\022DescribeCollectio"
"n\020g\022\023\n\017ShowCollections\020h\022\024\n\020GetSystemCon"
"figs\020i\022\022\n\016LoadCollection\020j\022\025\n\021ReleaseCol"
"lection\020k\022\017\n\013CreateAlias\020l\022\r\n\tDropAlias\020"
"m\022\016\n\nAlterAlias\020n\022\024\n\017CreatePartition\020\310\001\022"
"\022\n\rDropPartition\020\311\001\022\021\n\014HasPartition\020\312\001\022\026"
"\n\021DescribePartition\020\313\001\022\023\n\016ShowPartitions"
"\020\314\001\022\023\n\016LoadPartitions\020\315\001\022\026\n\021ReleaseParti"
"tions\020\316\001\022\021\n\014ShowSegments\020\372\001\022\024\n\017DescribeS"
"egment\020\373\001\022\021\n\014LoadSegments\020\374\001\022\024\n\017ReleaseS"
"egments\020\375\001\022\024\n\017HandoffSegments\020\376\001\022\030\n\023Load"
"BalanceSegments\020\377\001\022\025\n\020DescribeSegments\020\200"
"\002\022\020\n\013CreateIndex\020\254\002\022\022\n\rDescribeIndex\020\255\002\022"
"\016\n\tDropIndex\020\256\002\022\013\n\006Insert\020\220\003\022\013\n\006Delete\020\221"
"\003\022\n\n\005Flush\020\222\003\022\027\n\022ResendSegmentStats\020\223\003\022\013"
"\n\006Search\020\364\003\022\021\n\014SearchResult\020\365\003\022\022\n\rGetInd"
"exState\020\366\003\022\032\n\025GetIndexBuildProgress\020\367\003\022\034"
"\n\027GetCollectionStatistics\020\370\003\022\033\n\026GetParti"
"tionStatistics\020\371\003\022\r\n\010Retrieve\020\372\003\022\023\n\016Retr"
"ieveResult\020\373\003\022\024\n\017WatchDmChannels\020\374\003\022\025\n\020R"
"emoveDmChannels\020\375\003\022\027\n\022WatchQueryChannels"
"\020\376\003\022\030\n\023RemoveQueryChannels\020\377\003\022\035\n\030SealedS"
"egmentsChangeInfo\020\200\004\022\027\n\022WatchDeltaChanne"
"ls\020\201\004\022\024\n\017GetShardLeaders\020\202\004\022\020\n\013GetReplic"
"as\020\203\004\022\020\n\013SegmentInfo\020\330\004\022\017\n\nSystemInfo\020\331\004"
"\022\024\n\017GetRecoveryInfo\020\332\004\022\024\n\017GetSegmentStat"
"e\020\333\004\022\r\n\010TimeTick\020\260\t\022\023\n\016QueryNodeStats\020\261\t"
"\022\016\n\tLoadIndex\020\262\t\022\016\n\tRequestID\020\263\t\022\017\n\nRequ"
"estTSO\020\264\t\022\024\n\017AllocateSegment\020\265\t\022\026\n\021Segme"
"ntStatistics\020\266\t\022\025\n\020SegmentFlushDone\020\267\t\022\017"
"\n\nDataNodeTt\020\270\t\022\025\n\020CreateCredential\020\334\013\022\022"
"\n\rGetCredential\020\335\013\022\025\n\020DeleteCredential\020\336"
"\013\022\025\n\020UpdateCredential\020\337\013\022\026\n\021ListCredUser"
"names\020\340\013\022\017\n\nCreateRole\020\300\014\022\r\n\010DropRole\020\301\014"
"\022\024\n\017OperateUserRole\020\302\014\022\017\n\nSelectRole\020\303\014\022"
"\017\n\nSelectUser\020\304\014\022\023\n\016SelectResource\020\305\014\022\025\n"
"\020OperatePrivilege\020\306\014\022\020\n\013SelectGrant\020\307\014\022\033"
"\n\026RefreshPolicyInfoCache\020\310\014\022\017\n\nListPolic"
"y\020\311\014*\"\n\007DslType\022\007\n\003Dsl\020\000\022\016\n\nBoolExprV1\020\001"
"*B\n\017CompactionState\022\021\n\rUndefiedState\020\000\022\r"
"\n\tExecuting\020\001\022\r\n\tCompleted\020\002*X\n\020Consiste"
"ncyLevel\022\n\n\006Strong\020\000\022\013\n\007Session\020\001\022\013\n\007Bou"
"nded\020\002\022\016\n\nEventually\020\003\022\016\n\nCustomized\020\004*\257"
"\001\n\013ImportState\022\021\n\rImportPending\020\000\022\020\n\014Imp"
"ortFailed\020\001\022\021\n\rImportStarted\020\002\022\024\n\020Import"
"Downloaded\020\003\022\020\n\014ImportParsed\020\004\022\023\n\017Import"
"Persisted\020\005\022\023\n\017ImportCompleted\020\006\022\026\n\022Impo"
"rtAllocSegment\020\n*\036\n\014ResourceType\022\016\n\nColl"
"ection\020\000*\335\001\n\021ResourcePrivilege\022\020\n\014Privil"
"egeAll\020\000\022\023\n\017PrivilegeCreate\020\001\022\021\n\rPrivile"
"geDrop\020\002\022\022\n\016PrivilegeAlter\020\003\022\021\n\rPrivileg"
"eRead\020\004\022\021\n\rPrivilegeLoad\020\005\022\024\n\020PrivilegeR"
"elease\020\006\022\024\n\020PrivilegeCompact\020\007\022\023\n\017Privil"
"egeInsert\020\010\022\023\n\017PrivilegeDelete\020\t:^\n\021priv"
"ilege_ext_obj\022\037.google.protobuf.MessageO"
"ptions\030\351\007 \001(\0132!.milvus.proto.common.Priv"
"ilegeExtBW\n\016io.milvus.grpcB\013CommonProtoP"
"\001Z3github.com/milvus-io/milvus/internal/"
"proto/commonpb\240\001\001b\006proto3"
"\020\003\022\n\n\006Failed\020\004\022\r\n\tAbandoned\020\005*\202\001\n\014Segmen"
"tState\022\024\n\020SegmentStateNone\020\000\022\014\n\010NotExist"
"\020\001\022\013\n\007Growing\020\002\022\n\n\006Sealed\020\003\022\013\n\007Flushed\020\004"
"\022\014\n\010Flushing\020\005\022\013\n\007Dropped\020\006\022\r\n\tImporting"
"\020\007*>\n\017PlaceholderType\022\010\n\004None\020\000\022\020\n\014Binar"
"yVector\020d\022\017\n\013FloatVector\020e*\266\014\n\007MsgType\022\r"
"\n\tUndefined\020\000\022\024\n\020CreateCollection\020d\022\022\n\016D"
"ropCollection\020e\022\021\n\rHasCollection\020f\022\026\n\022De"
"scribeCollection\020g\022\023\n\017ShowCollections\020h\022"
"\024\n\020GetSystemConfigs\020i\022\022\n\016LoadCollection\020"
"j\022\025\n\021ReleaseCollection\020k\022\017\n\013CreateAlias\020"
"l\022\r\n\tDropAlias\020m\022\016\n\nAlterAlias\020n\022\024\n\017Crea"
"tePartition\020\310\001\022\022\n\rDropPartition\020\311\001\022\021\n\014Ha"
"sPartition\020\312\001\022\026\n\021DescribePartition\020\313\001\022\023\n"
"\016ShowPartitions\020\314\001\022\023\n\016LoadPartitions\020\315\001\022"
"\026\n\021ReleasePartitions\020\316\001\022\021\n\014ShowSegments\020"
"\372\001\022\024\n\017DescribeSegment\020\373\001\022\021\n\014LoadSegments"
"\020\374\001\022\024\n\017ReleaseSegments\020\375\001\022\024\n\017HandoffSegm"
"ents\020\376\001\022\030\n\023LoadBalanceSegments\020\377\001\022\025\n\020Des"
"cribeSegments\020\200\002\022\020\n\013CreateIndex\020\254\002\022\022\n\rDe"
"scribeIndex\020\255\002\022\016\n\tDropIndex\020\256\002\022\013\n\006Insert"
"\020\220\003\022\013\n\006Delete\020\221\003\022\n\n\005Flush\020\222\003\022\027\n\022ResendSe"
"gmentStats\020\223\003\022\013\n\006Search\020\364\003\022\021\n\014SearchResu"
"lt\020\365\003\022\022\n\rGetIndexState\020\366\003\022\032\n\025GetIndexBui"
"ldProgress\020\367\003\022\034\n\027GetCollectionStatistics"
"\020\370\003\022\033\n\026GetPartitionStatistics\020\371\003\022\r\n\010Retr"
"ieve\020\372\003\022\023\n\016RetrieveResult\020\373\003\022\024\n\017WatchDmC"
"hannels\020\374\003\022\025\n\020RemoveDmChannels\020\375\003\022\027\n\022Wat"
"chQueryChannels\020\376\003\022\030\n\023RemoveQueryChannel"
"s\020\377\003\022\035\n\030SealedSegmentsChangeInfo\020\200\004\022\027\n\022W"
"atchDeltaChannels\020\201\004\022\024\n\017GetShardLeaders\020"
"\202\004\022\020\n\013GetReplicas\020\203\004\022\020\n\013SegmentInfo\020\330\004\022\017"
"\n\nSystemInfo\020\331\004\022\024\n\017GetRecoveryInfo\020\332\004\022\024\n"
"\017GetSegmentState\020\333\004\022\r\n\010TimeTick\020\260\t\022\023\n\016Qu"
"eryNodeStats\020\261\t\022\016\n\tLoadIndex\020\262\t\022\016\n\tReque"
"stID\020\263\t\022\017\n\nRequestTSO\020\264\t\022\024\n\017AllocateSegm"
"ent\020\265\t\022\026\n\021SegmentStatistics\020\266\t\022\025\n\020Segmen"
"tFlushDone\020\267\t\022\017\n\nDataNodeTt\020\270\t\022\025\n\020Create"
"Credential\020\334\013\022\022\n\rGetCredential\020\335\013\022\025\n\020Del"
"eteCredential\020\336\013\022\025\n\020UpdateCredential\020\337\013\022"
"\026\n\021ListCredUsernames\020\340\013\022\017\n\nCreateRole\020\300\014"
"\022\r\n\010DropRole\020\301\014\022\024\n\017OperateUserRole\020\302\014\022\017\n"
"\nSelectRole\020\303\014\022\017\n\nSelectUser\020\304\014\022\023\n\016Selec"
"tResource\020\305\014\022\025\n\020OperatePrivilege\020\306\014\022\020\n\013S"
"electGrant\020\307\014\022\033\n\026RefreshPolicyInfoCache\020"
"\310\014\022\017\n\nListPolicy\020\311\014*\"\n\007DslType\022\007\n\003Dsl\020\000\022"
"\016\n\nBoolExprV1\020\001*B\n\017CompactionState\022\021\n\rUn"
"defiedState\020\000\022\r\n\tExecuting\020\001\022\r\n\tComplete"
"d\020\002*X\n\020ConsistencyLevel\022\n\n\006Strong\020\000\022\013\n\007S"
"ession\020\001\022\013\n\007Bounded\020\002\022\016\n\nEventually\020\003\022\016\n"
"\nCustomized\020\004*\257\001\n\013ImportState\022\021\n\rImportP"
"ending\020\000\022\020\n\014ImportFailed\020\001\022\021\n\rImportStar"
"ted\020\002\022\024\n\020ImportDownloaded\020\003\022\020\n\014ImportPar"
"sed\020\004\022\023\n\017ImportPersisted\020\005\022\023\n\017ImportComp"
"leted\020\006\022\026\n\022ImportAllocSegment\020\n*\036\n\014Resou"
"rceType\022\016\n\nCollection\020\000*\335\001\n\021ResourcePriv"
"ilege\022\020\n\014PrivilegeAll\020\000\022\023\n\017PrivilegeCrea"
"te\020\001\022\021\n\rPrivilegeDrop\020\002\022\022\n\016PrivilegeAlte"
"r\020\003\022\021\n\rPrivilegeRead\020\004\022\021\n\rPrivilegeLoad\020"
"\005\022\024\n\020PrivilegeRelease\020\006\022\024\n\020PrivilegeComp"
"act\020\007\022\023\n\017PrivilegeInsert\020\010\022\023\n\017PrivilegeD"
"elete\020\t:^\n\021privilege_ext_obj\022\037.google.pr"
"otobuf.MessageOptions\030\351\007 \001(\0132!.milvus.pr"
"oto.common.PrivilegeExtBW\n\016io.milvus.grp"
"cB\013CommonProtoP\001Z3github.com/milvus-io/m"
"ilvus/internal/proto/commonpb\240\001\001b\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_common_2eproto_deps[1] = {
&::descriptor_table_google_2fprotobuf_2fdescriptor_2eproto,
@ -476,7 +476,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_com
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_common_2eproto_once;
static bool descriptor_table_common_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_common_2eproto = {
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 4705,
&descriptor_table_common_2eproto_initialized, descriptor_table_protodef_common_2eproto, "common.proto", 4720,
&descriptor_table_common_2eproto_once, descriptor_table_common_2eproto_sccs, descriptor_table_common_2eproto_deps, 11, 1,
schemas, file_default_instances, TableStruct_common_2eproto::offsets,
file_level_metadata_common_2eproto, 11, file_level_enum_descriptors_common_2eproto, file_level_service_descriptors_common_2eproto,
@ -558,6 +558,7 @@ bool IndexState_IsValid(int value) {
case 2:
case 3:
case 4:
case 5:
return true;
default:
return false;

View File

@ -189,12 +189,13 @@ enum IndexState : int {
InProgress = 2,
Finished = 3,
Failed = 4,
Abandoned = 5,
IndexState_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::PROTOBUF_NAMESPACE_ID::int32>::min(),
IndexState_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::PROTOBUF_NAMESPACE_ID::int32>::max()
};
bool IndexState_IsValid(int value);
constexpr IndexState IndexState_MIN = IndexStateNone;
constexpr IndexState IndexState_MAX = Failed;
constexpr IndexState IndexState_MAX = Abandoned;
constexpr int IndexState_ARRAYSIZE = IndexState_MAX + 1;
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* IndexState_descriptor();

View File

@ -35,6 +35,7 @@ import (
"github.com/milvus-io/milvus/internal/indexnode"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexnodepb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
@ -232,13 +233,24 @@ func (s *Server) GetStatisticsChannel(ctx context.Context, req *internalpb.GetSt
return s.indexnode.GetStatisticsChannel(ctx)
}
// CreateIndex sends the create index request to IndexNode.
func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexRequest) (*commonpb.Status, error) {
return s.indexnode.CreateIndex(ctx, req)
// CreateJob sends the create index request to IndexNode.
func (s *Server) CreateJob(ctx context.Context, req *indexnodepb.CreateJobRequest) (*commonpb.Status, error) {
return s.indexnode.CreateJob(ctx, req)
}
func (s *Server) GetTaskSlots(ctx context.Context, req *indexpb.GetTaskSlotsRequest) (*indexpb.GetTaskSlotsResponse, error) {
return s.indexnode.GetTaskSlots(ctx, req)
// QueryJobs querys index jobs statues
func (s *Server) QueryJobs(ctx context.Context, req *indexnodepb.QueryJobsRequest) (*indexnodepb.QueryJobsRespond, error) {
return s.indexnode.QueryJobs(ctx, req)
}
// DropJobs drops index build jobs
func (s *Server) DropJobs(ctx context.Context, req *indexnodepb.DropJobsRequest) (*commonpb.Status, error) {
return s.indexnode.DropJobs(ctx, req)
}
// GetJobNum gets indexnode's job statisctics
func (s *Server) GetJobStats(ctx context.Context, req *indexnodepb.GetJobStatsRequest) (*indexnodepb.GetJobStatsRespond, error) {
return s.indexnode.GetJobStats(ctx, req)
}
// GetMetrics gets the metrics info of IndexNode.

View File

@ -0,0 +1,39 @@
package indexnode
import (
"context"
"fmt"
"sync"
"github.com/milvus-io/milvus/internal/storage"
)
type StorageFactory interface {
NewChunkManager(ctx context.Context, bucket, storageAccessKey string) (storage.ChunkManager, error)
}
type chunkMgr struct {
cached sync.Map
}
func (m *chunkMgr) NewChunkManager(ctx context.Context, bucket, storageAccessKey string) (storage.ChunkManager, error) {
key := m.cacheKey(bucket, storageAccessKey)
if v, ok := m.cached.Load(key); ok {
return v.(storage.ChunkManager), nil
}
opts := []storage.Option{
storage.AccessKeyID(storageAccessKey),
storage.BucketName(bucket),
}
factory := storage.NewChunkManagerFactory("local", "minio", opts...)
mgr, err := factory.NewVectorStorageChunkManager(ctx)
if err != nil {
return nil, err
}
v, _ := m.cached.LoadOrStore(key, mgr)
return v.(storage.ChunkManager), nil
}
func (m *chunkMgr) cacheKey(bucket, storageAccessKey string) string {
return fmt.Sprintf("%s/%s", bucket, storageAccessKey)
}

View File

@ -0,0 +1,270 @@
package indexnode
import (
"context"
"fmt"
"math/rand"
"sync"
"time"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/paramtable"
"golang.org/x/exp/mmap"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
vecFieldID = 101
)
var (
notImplErr = fmt.Errorf("not implemented error")
collschema = &schemapb.CollectionSchema{
Name: "mock_collection",
Description: "mock",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
FieldID: 0,
Name: "int64",
IsPrimaryKey: true,
Description: "",
DataType: schemapb.DataType_Int64,
AutoID: false,
},
{
FieldID: vecFieldID,
Name: "vector",
IsPrimaryKey: false,
Description: "",
DataType: schemapb.DataType_FloatVector,
AutoID: false,
},
},
}
collMeta = &etcdpb.CollectionMeta{
Schema: &schemapb.CollectionSchema{
Name: "mock_index",
Description: "mock",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
Name: "vector",
IsPrimaryKey: false,
Description: "",
DataType: schemapb.DataType_FloatVector,
AutoID: false,
},
},
},
}
)
var (
mockChunkMgrIns = &mockChunkmgr{}
)
type mockStorageFactory struct{}
func (m *mockStorageFactory) NewChunkManager(context.Context, string, string) (storage.ChunkManager, error) {
return mockChunkMgrIns, nil
}
type mockChunkmgr struct {
segmentData sync.Map
indexedData sync.Map
}
var _ storage.ChunkManager = &mockChunkmgr{}
// var _ dependency.Factory = &mockFactory{}
func (c *mockChunkmgr) Path(filePath string) (string, error) {
// TODO
return filePath, notImplErr
}
func (c *mockChunkmgr) Size(filePath string) (int64, error) {
// TODO
return 0, notImplErr
}
func (c *mockChunkmgr) Write(filePath string, content []byte) error {
c.indexedData.Store(filePath, content)
return nil
}
func (c *mockChunkmgr) MultiWrite(contents map[string][]byte) error {
// TODO
return notImplErr
}
func (c *mockChunkmgr) Exist(filePath string) (bool, error) {
// TODO
return false, notImplErr
}
func (c *mockChunkmgr) Read(filePath string) ([]byte, error) {
value, ok := c.segmentData.Load(filePath)
if !ok {
return nil, fmt.Errorf("data not exists")
}
return value.(*storage.Blob).Value, nil
}
func (c *mockChunkmgr) Reader(filePath string) (storage.FileReader, error) {
// TODO
return nil, notImplErr
}
func (c *mockChunkmgr) MultiRead(filePaths []string) ([][]byte, error) {
// TODO
return nil, notImplErr
}
func (c *mockChunkmgr) ReadWithPrefix(prefix string) ([]string, [][]byte, error) {
// TODO
return nil, nil, notImplErr
}
func (c *mockChunkmgr) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) {
// TODO
return nil, nil, notImplErr
}
func (c *mockChunkmgr) Mmap(filePath string) (*mmap.ReaderAt, error) {
// TODO
return nil, notImplErr
}
func (c *mockChunkmgr) ReadAt(filePath string, off int64, length int64) ([]byte, error) {
// TODO
return nil, notImplErr
}
func (c *mockChunkmgr) Remove(filePath string) error {
// TODO
return notImplErr
}
func (c *mockChunkmgr) MultiRemove(filePaths []string) error {
// TODO
return notImplErr
}
func (c *mockChunkmgr) RemoveWithPrefix(prefix string) error {
// TODO
return notImplErr
}
func (c *mockChunkmgr) mockFieldData(numrows, dim int, collectionID, partitionID, segmentID int64) {
idList := make([]int64, 0, numrows)
tsList := make([]int64, 0, numrows)
ts0 := time.Now().Unix()
for i := 0; i < numrows; i++ {
idList = append(idList, int64(i)+1)
tsList = append(tsList, ts0+int64(i))
}
vecs := randomFloats(numrows, dim)
idField := storage.Int64FieldData{
NumRows: []int64{},
Data: idList,
}
tsField := storage.Int64FieldData{
NumRows: []int64{},
Data: tsList,
}
vecField := storage.FloatVectorFieldData{
NumRows: []int64{},
Data: vecs,
Dim: dim,
}
insertData := &storage.InsertData{
Data: map[int64]storage.FieldData{
common.TimeStampField: &tsField,
common.RowIDField: &idField,
vecFieldID: &vecField,
},
}
insertCodec := &storage.InsertCodec{
Schema: collMeta,
}
blobs, _, err := insertCodec.Serialize(partitionID, segmentID, insertData)
if err != nil {
panic(err)
}
if len(blobs) != 1 {
panic("invalid blobs")
}
c.segmentData.Store(dataPath(collectionID, partitionID, segmentID), blobs[0])
}
func NewMockChunkManager() *mockChunkmgr {
return &mockChunkmgr{}
}
type mockFactory struct {
chunkMgr *mockChunkmgr
}
func (f *mockFactory) NewCacheStorageChunkManager(context.Context) (storage.ChunkManager, error) {
return nil, notImplErr
}
func (f *mockFactory) NewVectorStorageChunkManager(context.Context) (storage.ChunkManager, error) {
if f.chunkMgr != nil {
return f.chunkMgr, nil
}
return nil, fmt.Errorf("factory not inited")
}
func (f *mockFactory) Init(*paramtable.ComponentParam) {
// do nothing
}
func (f *mockFactory) NewMsgStream(context.Context) (msgstream.MsgStream, error) {
// TOD
return nil, notImplErr
}
func (f *mockFactory) NewTtMsgStream(context.Context) (msgstream.MsgStream, error) {
// TODO
return nil, notImplErr
}
func (f *mockFactory) NewQueryMsgStream(context.Context) (msgstream.MsgStream, error) {
// TODO
return nil, notImplErr
}
func (f *mockFactory) NewMsgStreamDisposer(ctx context.Context) func([]string, string) error {
// TODO
return nil
}
func randomFloats(rows, dim int) []float32 {
vecs := make([]float32, 0, rows)
for i := 0; i < rows; i++ {
vec := make([]float32, 0, dim)
for j := 0; j < dim; j++ {
vec = append(vec, rand.Float32())
}
vecs = append(vecs, vec...)
}
return vecs
}
func dataPath(collectionID, partitionID, segmentID int64) string {
return fmt.Sprintf("%d-%d-%d", collectionID, partitionID, segmentID)
}

View File

@ -0,0 +1,65 @@
package indexnode
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"sync"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
)
const (
etcdListenPort = 2389
etcdPeerPort = 2390
)
var (
startSvr sync.Once
stopSvr sync.Once
etcdSvr *embed.Etcd
)
func startEmbedEtcd() {
startSvr.Do(func() {
dir, err := ioutil.TempDir(os.TempDir(), "milvus_ut_etcd")
if err != nil {
panic(err)
}
config := embed.NewConfig()
config.Dir = dir
config.LogLevel = "warn"
config.LogOutputs = []string{"default"}
u, err := url.Parse(fmt.Sprintf("http://localhost:%d", etcdListenPort))
if err != nil {
panic(err)
}
config.LCUrls = []url.URL{*u}
u, err = url.Parse(fmt.Sprintf("http://localhost:%d", etcdPeerPort))
if err != nil {
panic(err)
}
config.LPUrls = []url.URL{*u}
etcdSvr, err = embed.StartEtcd(config)
if err != nil {
panic(err)
}
})
}
func stopEmbedEtcd() {
stopSvr.Do(func() {
etcdSvr.Close()
os.RemoveAll(etcdSvr.Config().Dir)
})
}
func getEtcdClient() *clientv3.Client {
return v3client.New(etcdSvr.Server)
}

View File

@ -38,28 +38,24 @@ import (
"time"
"unsafe"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/dependency"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/common"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
// TODO add comments
// UniqueID is an alias of int64, is used as a unique identifier for the request.
type UniqueID = typeutil.UniqueID
@ -72,6 +68,11 @@ var _ types.IndexNodeComponent = (*IndexNode)(nil)
// Params is a GlobalParamTable singleton of indexnode
var Params paramtable.ComponentParam
type taskKey struct {
ClusterID UniqueID
BuildID UniqueID
}
// IndexNode is a component that executes the task of building indexes.
type IndexNode struct {
stateCode atomic.Value
@ -79,25 +80,21 @@ type IndexNode struct {
loopCtx context.Context
loopCancel func()
sched *TaskScheduler
sched *taskScheduler
once sync.Once
factory dependency.Factory
chunkManager storage.ChunkManager
session *sessionutil.Session
factory dependency.Factory
storageFactory StorageFactory
session *sessionutil.Session
// Add callback functions at different stages
startCallbacks []func()
closeCallbacks []func()
etcdCli *clientv3.Client
etcdKV *etcdkv.EtcdKV
finishedTasks map[UniqueID]commonpb.IndexState
etcdCli *clientv3.Client
closer io.Closer
initOnce sync.Once
initOnce sync.Once
stateLock sync.Mutex
tasks map[taskKey]*taskInfo
}
// NewIndexNode creates a new IndexNode component.
@ -106,15 +103,14 @@ func NewIndexNode(ctx context.Context, factory dependency.Factory) (*IndexNode,
rand.Seed(time.Now().UnixNano())
ctx1, cancel := context.WithCancel(ctx)
b := &IndexNode{
loopCtx: ctx1,
loopCancel: cancel,
factory: factory,
loopCtx: ctx1,
loopCancel: cancel,
factory: factory,
storageFactory: &chunkMgr{},
tasks: map[taskKey]*taskInfo{},
}
b.UpdateStateCode(internalpb.StateCode_Abnormal)
sc, err := NewTaskScheduler(b.loopCtx, b.chunkManager)
if err != nil {
return nil, err
}
sc := NewTaskScheduler(b.loopCtx, 1024)
b.sched = sc
return b, nil
@ -174,8 +170,6 @@ func (i *IndexNode) Init() error {
i.initOnce.Do(func() {
Params.Init()
i.factory.Init(&Params)
i.UpdateStateCode(internalpb.StateCode_Initializing)
log.Debug("IndexNode init", zap.Any("State", i.stateCode.Load().(internalpb.StateCode)))
err := i.initSession()
@ -186,19 +180,12 @@ func (i *IndexNode) Init() error {
}
log.Debug("IndexNode init session successful", zap.Int64("serverID", i.session.ServerID))
etcdKV := etcdkv.NewEtcdKV(i.etcdCli, Params.EtcdCfg.MetaRootPath)
i.etcdKV = etcdKV
chunkManager, err := i.factory.NewVectorStorageChunkManager(i.loopCtx)
if err != nil {
log.Error("IndexNode NewMinIOKV failed", zap.Error(err))
initErr = err
return
}
i.chunkManager = chunkManager
log.Debug("IndexNode NewMinIOKV succeeded")
i.closer = trace.InitTracing("index_node")
@ -214,7 +201,7 @@ func (i *IndexNode) Init() error {
func (i *IndexNode) Start() error {
var startErr error = nil
i.once.Do(func() {
startErr = i.sched.Start()
i.sched.Start()
Params.IndexNodeCfg.CreatedTime = time.Now()
Params.IndexNodeCfg.UpdatedTime = time.Now()
@ -222,10 +209,6 @@ func (i *IndexNode) Start() error {
i.UpdateStateCode(internalpb.StateCode_Healthy)
log.Debug("IndexNode", zap.Any("State", i.stateCode.Load()))
})
// Start callbacks
for _, cb := range i.startCallbacks {
cb()
}
log.Debug("IndexNode start finished", zap.Error(startErr))
return startErr
@ -233,16 +216,20 @@ func (i *IndexNode) Start() error {
// Stop closes the server.
func (i *IndexNode) Stop() error {
// TODO clear cached chunkmgr, close clients
// https://github.com/milvus-io/milvus/issues/12282
i.UpdateStateCode(internalpb.StateCode_Abnormal)
// cleanup all running tasks
deletedTasks := i.deleteAllTasks()
for _, task := range deletedTasks {
if task.cancel != nil {
task.cancel()
}
}
i.loopCancel()
if i.sched != nil {
i.sched.Close()
}
for _, cb := range i.closeCallbacks {
cb()
}
i.session.Revoke(time.Second)
log.Debug("Index node stopped.")
@ -264,83 +251,6 @@ func (i *IndexNode) isHealthy() bool {
return code == internalpb.StateCode_Healthy
}
// CreateIndex receives request from IndexCoordinator to build an index.
// Index building is asynchronous, so when an index building request comes, IndexNode records the task and returns.
func (i *IndexNode) CreateIndex(ctx context.Context, request *indexpb.CreateIndexRequest) (*commonpb.Status, error) {
if i.stateCode.Load().(internalpb.StateCode) != internalpb.StateCode_Healthy {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "state code is not healthy",
}, nil
}
log.Info("IndexNode building index ...",
zap.Int64("IndexBuildID", request.IndexBuildID),
zap.String("IndexName", request.IndexName),
zap.Int64("IndexID", request.IndexID),
zap.Int64("Version", request.Version),
zap.String("MetaPath", request.MetaPath),
zap.Int("binlog paths num", len(request.DataPaths)),
zap.Any("TypeParams", request.TypeParams),
zap.Any("IndexParams", request.IndexParams))
sp, ctx2 := trace.StartSpanFromContextWithOperationName(i.loopCtx, "IndexNode-CreateIndex")
defer sp.Finish()
sp.SetTag("IndexBuildID", strconv.FormatInt(request.IndexBuildID, 10))
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.TotalLabel).Inc()
t := &IndexBuildTask{
BaseTask: BaseTask{
ctx: ctx2,
done: make(chan error),
},
req: request,
cm: i.chunkManager,
etcdKV: i.etcdKV,
nodeID: Params.IndexNodeCfg.GetNodeID(),
serializedSize: 0,
}
ret := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
err := i.sched.IndexBuildQueue.Enqueue(t)
if err != nil {
log.Warn("IndexNode failed to schedule", zap.Int64("indexBuildID", request.IndexBuildID), zap.Error(err))
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
ret.Reason = err.Error()
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.FailLabel).Inc()
return ret, nil
}
log.Info("IndexNode successfully scheduled", zap.Int64("indexBuildID", request.IndexBuildID))
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.SuccessLabel).Inc()
return ret, nil
}
// GetTaskSlots gets how many task the IndexNode can still perform.
func (i *IndexNode) GetTaskSlots(ctx context.Context, req *indexpb.GetTaskSlotsRequest) (*indexpb.GetTaskSlotsResponse, error) {
if i.stateCode.Load().(internalpb.StateCode) != internalpb.StateCode_Healthy {
return &indexpb.GetTaskSlotsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "state code is not healthy",
},
}, nil
}
log.Info("IndexNode GetTaskSlots received")
ret := &indexpb.GetTaskSlotsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
}
ret.Slots = int64(i.sched.GetTaskSlots())
log.Info("IndexNode GetTaskSlots success", zap.Int64("slots", ret.Slots))
return ret, nil
}
// GetComponentStates gets the component states of IndexNode.
func (i *IndexNode) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
log.Debug("get IndexNode components states ...")
@ -351,7 +261,7 @@ func (i *IndexNode) GetComponentStates(ctx context.Context) (*internalpb.Compone
stateInfo := &internalpb.ComponentInfo{
// NodeID: Params.NodeID, // will race with i.Register()
NodeID: nodeID,
Role: "NodeImpl",
Role: typeutil.IndexNodeRole,
StateCode: i.stateCode.Load().(internalpb.StateCode),
}
@ -391,62 +301,6 @@ func (i *IndexNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringR
}, nil
}
// GetMetrics gets the metrics info of IndexNode.
// TODO(dragondriver): cache the Metrics and set a retention to the cache
func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
if !i.isHealthy() {
log.Warn("IndexNode.GetMetrics failed",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID())))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID()),
},
Response: "",
}, nil
}
metricType, err := metricsinfo.ParseMetricType(req.Request)
if err != nil {
log.Warn("IndexNode.GetMetrics failed to parse metric type",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.Error(err))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
Response: "",
}, nil
}
if metricType == metricsinfo.SystemInfoMetrics {
metrics, err := getSystemInfoMetrics(ctx, req, i)
log.Debug("IndexNode.GetMetrics",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType),
zap.Error(err))
return metrics, nil
}
log.Warn("IndexNode.GetMetrics failed, request metric type is not implemented yet",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: metricsinfo.MsgUnimplementedMetric,
},
Response: "",
}, nil
func (i *IndexNode) GetNodeID() int64 {
return Params.IndexNodeCfg.GetNodeID()
}

View File

@ -0,0 +1,44 @@
package indexnode
import (
"context"
"github.com/milvus-io/milvus/internal/types"
)
type mockIndexNodeComponent struct {
IndexNode
}
var _ types.IndexNodeComponent = &mockIndexNodeComponent{}
func NewMockIndexNodeComponent(ctx context.Context) (types.IndexNodeComponent, error) {
Params.Init()
factory := &mockFactory{
chunkMgr: &mockChunkmgr{},
}
node, err := NewIndexNode(ctx, factory)
if err != nil {
return nil, err
}
startEmbedEtcd()
etcdCli := getEtcdClient()
node.SetEtcdClient(etcdCli)
node.storageFactory = &mockStorageFactory{}
if err := node.Init(); err != nil {
return nil, err
}
if err := node.Start(); err != nil {
return nil, err
}
if err := node.Register(); err != nil {
return nil, err
}
return &mockIndexNodeComponent{
IndexNode: *node,
}, nil
}

View File

@ -16,187 +16,187 @@
package indexnode
import (
"context"
"strconv"
"testing"
// import (
// "context"
// "strconv"
// "testing"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/stretchr/testify/assert"
)
// "github.com/golang/protobuf/proto"
// "github.com/milvus-io/milvus/internal/proto/commonpb"
// "github.com/milvus-io/milvus/internal/proto/indexpb"
// "github.com/milvus-io/milvus/internal/proto/internalpb"
// "github.com/milvus-io/milvus/internal/proto/milvuspb"
// "github.com/milvus-io/milvus/internal/util/etcd"
// "github.com/milvus-io/milvus/internal/util/metricsinfo"
// "github.com/stretchr/testify/assert"
// )
func TestIndexNodeMock(t *testing.T) {
Params.Init()
inm := Mock{
Build: true,
}
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.NoError(t, err)
inm.SetEtcdClient(etcdCli)
defer etcdCli.Close()
err = inm.Register()
assert.Nil(t, err)
err = inm.Init()
assert.Nil(t, err)
err = inm.Start()
assert.Nil(t, err)
ctx := context.Background()
// func TestIndexNodeMock(t *testing.T) {
// Params.Init()
// inm := Mock{
// Build: true,
// }
// etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
// assert.NoError(t, err)
// inm.SetEtcdClient(etcdCli)
// defer etcdCli.Close()
// err = inm.Register()
// assert.Nil(t, err)
// err = inm.Init()
// assert.Nil(t, err)
// err = inm.Start()
// assert.Nil(t, err)
// ctx := context.Background()
t.Run("GetComponentStates", func(t *testing.T) {
states, err := inm.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, internalpb.StateCode_Healthy, states.State.StateCode)
})
// t.Run("GetComponentStates", func(t *testing.T) {
// states, err := inm.GetComponentStates(ctx)
// assert.Nil(t, err)
// assert.Equal(t, internalpb.StateCode_Healthy, states.State.StateCode)
// })
t.Run("GetTimeTickChannel", func(t *testing.T) {
resp, err := inm.GetTimeTickChannel(ctx)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
// t.Run("GetTimeTickChannel", func(t *testing.T) {
// resp, err := inm.GetTimeTickChannel(ctx)
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// })
t.Run("GetStatisticsChannel", func(t *testing.T) {
resp, err := inm.GetStatisticsChannel(ctx)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
// t.Run("GetStatisticsChannel", func(t *testing.T) {
// resp, err := inm.GetStatisticsChannel(ctx)
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// })
t.Run("CreateIndex", func(t *testing.T) {
req := &indexpb.CreateIndexRequest{
IndexBuildID: 0,
IndexID: 0,
DataPaths: []string{},
}
resp, err := inm.CreateIndex(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
// t.Run("CreateIndex", func(t *testing.T) {
// req := &indexpb.CreateIndexRequest{
// IndexBuildID: 0,
// IndexID: 0,
// DataPaths: []string{},
// }
// resp, err := inm.CreateIndex(ctx, req)
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// })
t.Run("GetMetrics", func(t *testing.T) {
req, err := metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
assert.Nil(t, err)
resp, err := inm.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// t.Run("GetMetrics", func(t *testing.T) {
// req, err := metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
// assert.Nil(t, err)
// resp, err := inm.GetMetrics(ctx, req)
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
req2, err := metricsinfo.ConstructRequestByMetricType("IndexNode")
assert.Nil(t, err)
resp2, err := inm.GetMetrics(ctx, req2)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp2.Status.ErrorCode)
})
// req2, err := metricsinfo.ConstructRequestByMetricType("IndexNode")
// assert.Nil(t, err)
// resp2, err := inm.GetMetrics(ctx, req2)
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp2.Status.ErrorCode)
// })
err = inm.Stop()
assert.Nil(t, err)
}
// err = inm.Stop()
// assert.Nil(t, err)
// }
func TestIndexNodeMockError(t *testing.T) {
inm := Mock{
Failure: false,
Build: false,
Err: true,
}
// func TestIndexNodeMockError(t *testing.T) {
// inm := Mock{
// Failure: false,
// Build: false,
// Err: true,
// }
ctx := context.Background()
err := inm.Register()
assert.NotNil(t, err)
// ctx := context.Background()
// err := inm.Register()
// assert.NotNil(t, err)
err = inm.Init()
assert.NotNil(t, err)
// err = inm.Init()
// assert.NotNil(t, err)
err = inm.Start()
assert.NotNil(t, err)
// err = inm.Start()
// assert.NotNil(t, err)
t.Run("GetComponentStates error", func(t *testing.T) {
resp, err := inm.GetComponentStates(ctx)
assert.NotNil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
// t.Run("GetComponentStates error", func(t *testing.T) {
// resp, err := inm.GetComponentStates(ctx)
// assert.NotNil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
// })
t.Run("GetStatisticsChannel error", func(t *testing.T) {
resp, err := inm.GetStatisticsChannel(ctx)
assert.NotNil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
// t.Run("GetStatisticsChannel error", func(t *testing.T) {
// resp, err := inm.GetStatisticsChannel(ctx)
// assert.NotNil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
// })
t.Run("GetTimeTickChannel error", func(t *testing.T) {
resp, err := inm.GetTimeTickChannel(ctx)
assert.NotNil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
// t.Run("GetTimeTickChannel error", func(t *testing.T) {
// resp, err := inm.GetTimeTickChannel(ctx)
// assert.NotNil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
// })
t.Run("CreateIndex error", func(t *testing.T) {
resp, err := inm.CreateIndex(ctx, &indexpb.CreateIndexRequest{})
assert.NotNil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.ErrorCode)
})
// t.Run("CreateIndex error", func(t *testing.T) {
// resp, err := inm.CreateIndex(ctx, &indexpb.CreateIndexRequest{})
// assert.NotNil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.ErrorCode)
// })
t.Run("GetMetrics error", func(t *testing.T) {
req := &milvuspb.GetMetricsRequest{}
resp, err := inm.GetMetrics(ctx, req)
// t.Run("GetMetrics error", func(t *testing.T) {
// req := &milvuspb.GetMetricsRequest{}
// resp, err := inm.GetMetrics(ctx, req)
assert.NotNil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
// assert.NotNil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
// })
err = inm.Stop()
assert.NotNil(t, err)
}
// err = inm.Stop()
// assert.NotNil(t, err)
// }
func TestIndexNodeMockFiled(t *testing.T) {
inm := Mock{
Failure: true,
Build: true,
Err: false,
}
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.NoError(t, err)
inm.SetEtcdClient(etcdCli)
defer etcdCli.Close()
err = inm.Register()
assert.Nil(t, err)
err = inm.Init()
assert.Nil(t, err)
err = inm.Start()
assert.Nil(t, err)
ctx := context.Background()
// func TestIndexNodeMockFiled(t *testing.T) {
// inm := Mock{
// Failure: true,
// Build: true,
// Err: false,
// }
// etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
// assert.NoError(t, err)
// inm.SetEtcdClient(etcdCli)
// defer etcdCli.Close()
// err = inm.Register()
// assert.Nil(t, err)
// err = inm.Init()
// assert.Nil(t, err)
// err = inm.Start()
// assert.Nil(t, err)
// ctx := context.Background()
t.Run("CreateIndex failed", func(t *testing.T) {
req := &indexpb.CreateIndexRequest{
IndexBuildID: 0,
IndexID: 0,
DataPaths: []string{},
}
key := "/indexes/" + strconv.FormatInt(10, 10)
indexMeta := &indexpb.IndexMeta{
IndexBuildID: 10,
State: commonpb.IndexState_InProgress,
IndexVersion: 0,
}
// t.Run("CreateIndex failed", func(t *testing.T) {
// req := &indexpb.CreateIndexRequest{
// IndexBuildID: 0,
// IndexID: 0,
// DataPaths: []string{},
// }
// key := "/indexes/" + strconv.FormatInt(10, 10)
// indexMeta := &indexpb.IndexMeta{
// IndexBuildID: 10,
// State: commonpb.IndexState_InProgress,
// IndexVersion: 0,
// }
value, err := proto.Marshal(indexMeta)
assert.Nil(t, err)
err = inm.etcdKV.Save(key, string(value))
assert.Nil(t, err)
resp, err := inm.CreateIndex(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
err = inm.etcdKV.RemoveWithPrefix(key)
assert.Nil(t, err)
})
t.Run("GetMetrics failed", func(t *testing.T) {
req := &milvuspb.GetMetricsRequest{}
resp, err := inm.GetMetrics(ctx, req)
// value, err := proto.Marshal(indexMeta)
// assert.Nil(t, err)
// err = inm.etcdKV.Save(key, string(value))
// assert.Nil(t, err)
// resp, err := inm.CreateIndex(ctx, req)
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// err = inm.etcdKV.RemoveWithPrefix(key)
// assert.Nil(t, err)
// })
// t.Run("GetMetrics failed", func(t *testing.T) {
// req := &milvuspb.GetMetricsRequest{}
// resp, err := inm.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
// assert.Nil(t, err)
// assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
// })
err = inm.Stop()
assert.Nil(t, err)
}
// err = inm.Stop()
// assert.Nil(t, err)
// }

View File

@ -0,0 +1,250 @@
package indexnode
import (
"context"
"fmt"
"strconv"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexnodepb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
"go.uber.org/zap"
)
func (i *IndexNode) CreateJob(ctx context.Context, req *indexnodepb.CreateJobRequest) (*commonpb.Status, error) {
stateCode := i.stateCode.Load().(internalpb.StateCode)
if stateCode != internalpb.StateCode_Healthy {
log.Warn("index node not ready", zap.Int32("state", int32(stateCode)), zap.Int64("ClusterID", req.ClusterID), zap.Int64("IndexBuildID", req.BuildID))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "state code is not healthy",
}, nil
}
log.Info("IndexNode building index ...",
zap.Int64("ClusterID", req.ClusterID),
zap.Int64("IndexBuildID", req.BuildID),
zap.Int64("IndexID", req.IndexID),
zap.String("IndexName", req.IndexName),
zap.String("IndexFilePrefix", req.IndexFilePrefix),
zap.Int64("IndexVersion", req.IndexVersion),
zap.Strings("DataPaths", req.DataPaths),
zap.Any("TypeParams", req.TypeParams),
zap.Any("IndexParams", req.IndexParams))
sp, _ := trace.StartSpanFromContextWithOperationName(ctx, "IndexNode-CreateIndex")
defer sp.Finish()
sp.SetTag("IndexBuildID", strconv.FormatInt(req.BuildID, 10))
sp.SetTag("ClusterID", strconv.FormatInt(req.ClusterID, 10))
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.TotalLabel).Inc()
taskCtx := logutil.WithFields(i.loopCtx, zap.Int64("ClusterID", req.ClusterID), zap.Int64("IndexBuildID", req.BuildID))
taskCtx, taskCancel := context.WithCancel(taskCtx)
if oldInfo := i.loadOrStoreTask(req.ClusterID, req.BuildID, &taskInfo{
cancel: taskCancel,
state: commonpb.IndexState_InProgress}); oldInfo != nil {
log.Warn("duplicated index build task", zap.Int64("ClusterID", req.ClusterID), zap.Int64("BuildID", req.BuildID))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_BuildIndexError,
Reason: "duplicated index build task",
}, nil
}
cm, err := i.storageFactory.NewChunkManager(ctx, req.BucketName, req.StorageAccessKey)
if err != nil {
log.Error("create chunk manager failed", zap.String("Bucket", req.BucketName), zap.String("AccessKey", req.StorageAccessKey),
zap.Int64("ClusterID", req.ClusterID), zap.Int64("IndexBuildID", req.BuildID))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_BuildIndexError,
Reason: "create chunk manager failed",
}, nil
}
task := &indexBuildTask{
ident: fmt.Sprintf("%d/%d", req.ClusterID, req.BuildID),
ctx: taskCtx,
cancel: taskCancel,
BuildID: req.BuildID,
ClusterID: req.ClusterID,
node: i,
req: req,
cm: cm,
nodeID: i.GetNodeID(),
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %d", req.BuildID, req.ClusterID)),
serializedSize: 0,
}
ret := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}
if err := i.sched.Enqueue(task); err != nil {
log.Warn("IndexNode failed to schedule", zap.Int64("IndexBuildID", req.BuildID), zap.Int64("ClusterID", req.ClusterID), zap.Error(err))
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
ret.Reason = err.Error()
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.FailLabel).Inc()
return ret, nil
}
log.Info("IndexNode successfully scheduled", zap.Int64("IndexBuildID", req.BuildID), zap.Int64("ClusterID", req.ClusterID), zap.String("indexName", req.IndexName))
return ret, nil
}
func (i *IndexNode) QueryJobs(ctx context.Context, req *indexnodepb.QueryJobsRequest) (*indexnodepb.QueryJobsRespond, error) {
stateCode := i.stateCode.Load().(internalpb.StateCode)
if stateCode != internalpb.StateCode_Healthy {
log.Warn("index node not ready", zap.Int32("state", int32(stateCode)), zap.Int64("ClusterID", req.ClusterID))
return &indexnodepb.QueryJobsRespond{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "state code is not healthy",
},
}, nil
}
log.Debug("querying index build task", zap.Int64("ClusterID", req.ClusterID), zap.Int64s("IndexBuildIDs", req.BuildIDs))
infos := make(map[UniqueID]*taskInfo)
i.foreachTaskInfo(func(clusterID, buildID UniqueID, info *taskInfo) {
if clusterID == req.ClusterID {
infos[buildID] = &taskInfo{
state: info.state,
indexfiles: info.indexfiles[:],
}
}
})
ret := &indexnodepb.QueryJobsRespond{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
ClusterID: req.ClusterID,
IndexInfos: make([]*indexnodepb.IndexInfo, 0, len(req.BuildIDs)),
}
for i, buildID := range req.BuildIDs {
ret.IndexInfos = append(ret.IndexInfos, &indexnodepb.IndexInfo{
BuildID: buildID,
State: commonpb.IndexState_IndexStateNone,
IndexFiles: nil,
})
if info, ok := infos[buildID]; ok {
ret.IndexInfos[i].State = info.state
ret.IndexInfos[i].IndexFiles = info.indexfiles
}
}
return ret, nil
}
func (i *IndexNode) DropJobs(ctx context.Context, req *indexnodepb.DropJobsRequest) (*commonpb.Status, error) {
log.Debug("drop index build jobs", zap.Int64("ClusterID", req.ClusterID), zap.Int64s("IndexBuildIDs", req.BuildIDs))
stateCode := i.stateCode.Load().(internalpb.StateCode)
if stateCode != internalpb.StateCode_Healthy {
log.Warn("index node not ready", zap.Int32("state", int32(stateCode)), zap.Int64("ClusterID", req.ClusterID))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "state code is not healthy",
}, nil
}
keys := make([]taskKey, 0, len(req.BuildIDs))
for _, buildID := range req.BuildIDs {
keys = append(keys, taskKey{ClusterID: req.ClusterID, BuildID: buildID})
}
infos := i.deleteTaskInfos(keys)
for _, info := range infos {
if info.cancel != nil {
info.cancel()
}
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
}
func (i *IndexNode) GetJobStats(ctx context.Context, req *indexnodepb.GetJobStatsRequest) (*indexnodepb.GetJobStatsRespond, error) {
stateCode := i.stateCode.Load().(internalpb.StateCode)
if stateCode != internalpb.StateCode_Healthy {
log.Warn("index node not ready", zap.Int32("state", int32(stateCode)))
return &indexnodepb.GetJobStatsRespond{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "state code is not healthy",
},
}, nil
}
pending := i.sched.GetPendingJob()
jobInfos := make([]*indexnodepb.JobInfo, 0)
i.foreachTaskInfo(func(clusterID, buildID UniqueID, info *taskInfo) {
if info.statistic != nil {
jobInfos = append(jobInfos, proto.Clone(info.statistic).(*indexnodepb.JobInfo))
}
})
return &indexnodepb.GetJobStatsRespond{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
PendingJobs: int64(pending),
JobInfos: jobInfos,
}, nil
}
// GetMetrics gets the metrics info of IndexNode.
// TODO(dragondriver): cache the Metrics and set a retention to the cache
func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
if !i.isHealthy() {
log.Warn("IndexNode.GetMetrics failed",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID())))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID()),
},
Response: "",
}, nil
}
metricType, err := metricsinfo.ParseMetricType(req.Request)
if err != nil {
log.Warn("IndexNode.GetMetrics failed to parse metric type",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.Error(err))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
Response: "",
}, nil
}
if metricType == metricsinfo.SystemInfoMetrics {
metrics, err := getSystemInfoMetrics(ctx, req, i)
log.Debug("IndexNode.GetMetrics",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType),
zap.Error(err))
return metrics, nil
}
log.Warn("IndexNode.GetMetrics failed, request metric type is not implemented yet",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: metricsinfo.MsgUnimplementedMetric,
},
Response: "",
}, nil
}

View File

@ -0,0 +1,383 @@
package indexnode
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"sync"
"testing"
"time"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexnodepb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/stretchr/testify/assert"
)
func TestIndexNodeSimple(t *testing.T) {
in, err := NewMockIndexNodeComponent(context.TODO())
assert.Nil(t, err)
ctx := context.TODO()
state, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, internalpb.StateCode_Healthy)
idxParams := map[string]string{
"nlist": "128",
}
idxParamsPayload, err := json.Marshal(idxParams)
assert.Nil(t, err, err)
var (
clusterID int64 = 0
idxFilePrefix = "mock_idx"
buildID int64 = 1
collID int64 = 101
partID int64 = 201
segID int64 = 301
idxID int64 = 401
idxName = "mock_idx"
vecDim int64 = 8
typeParams = []*commonpb.KeyValuePair{
{
Key: "dim",
Value: fmt.Sprintf("%d", vecDim),
},
}
indexParams = []*commonpb.KeyValuePair{
{
Key: "params",
Value: string(idxParamsPayload),
},
{
Key: "metric_type",
Value: "L2",
},
{
Key: "index_type",
Value: "IVF_FLAT",
},
}
mockChunkMgr = mockChunkMgrIns
)
mockChunkMgr.mockFieldData(1000, dim, collID, partID, segID)
t.Run("create job", func(t *testing.T) {
createReq := &indexnodepb.CreateJobRequest{
ClusterID: clusterID,
IndexFilePrefix: idxFilePrefix,
BuildID: int64(buildID),
DataPaths: []string{dataPath(collID, partID, segID)},
IndexVersion: 0,
IndexID: idxID,
IndexName: idxName,
IndexParams: indexParams,
TypeParams: typeParams,
}
status, err := in.CreateJob(ctx, createReq)
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
})
t.Run(("query job"), func(t *testing.T) {
queryJob := &indexnodepb.QueryJobsRequest{
ClusterID: clusterID,
BuildIDs: []int64{buildID},
}
timeout := time.After(time.Second * 10)
var idxInfo *indexnodepb.IndexInfo
Loop:
for {
select {
case <-timeout:
t.Fatal("timeout for querying jobs")
default:
resp, err := in.QueryJobs(ctx, queryJob)
assert.Nil(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, resp.ClusterID, clusterID)
for _, indexInfo := range resp.IndexInfos {
if indexInfo.BuildID == buildID {
if indexInfo.State == commonpb.IndexState_Finished {
idxInfo = indexInfo
break Loop
}
}
}
}
}
assert.NotNil(t, idxInfo)
for _, idxFile := range idxInfo.IndexFiles {
_, ok := mockChunkMgr.indexedData.Load(idxFile)
assert.True(t, ok)
t.Logf("indexed file: %s", idxFile)
}
jobNumRet, err := in.GetJobStats(ctx, &indexnodepb.GetJobStatsRequest{})
assert.Nil(t, err)
assert.Equal(t, jobNumRet.Status.GetErrorCode(), commonpb.ErrorCode_Success)
assert.Equal(t, jobNumRet.PendingJobs, int64(0))
assert.Equal(t, len(jobNumRet.JobInfos), 1)
jobInfo := jobNumRet.JobInfos[0]
assert.True(t, jobInfo.Dim == 8)
assert.True(t, jobInfo.NumRows == 1000)
assert.True(t, jobInfo.PodID == 1)
assert.ElementsMatch(t, jobInfo.IndexParams, indexParams)
})
t.Run("drop not exists jobs", func(t *testing.T) {
status, err := in.DropJobs(ctx, &indexnodepb.DropJobsRequest{
ClusterID: clusterID,
BuildIDs: []int64{100001},
})
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
})
}
type testTask struct {
buildID int64
collID int64
partID int64
segID int64
idxID int64
dim int
rownum int
typeParams []*commonpb.KeyValuePair
idxParams []*commonpb.KeyValuePair
}
func TestIndexNodeComplex(t *testing.T) {
idxParams := map[string]string{
"nlist": "128",
}
idxParamsPayload, err := json.Marshal(idxParams)
assert.Nil(t, err)
var (
clusterID int64 = 0
buildID0 int64 = 0
collID0 int64 = 10000
partID0 int64 = 20000
segID0 int64 = 30000
idxID0 int64 = 40000
typesParamsLists = [][]*commonpb.KeyValuePair{
{{
Key: "dim",
Value: fmt.Sprintf("%d", 8),
}},
{{
Key: "dim",
Value: fmt.Sprintf("%d", 16),
}},
{{
Key: "dim",
Value: fmt.Sprintf("%d", 32),
}},
}
rowNums = []int{100, 1000, 10000}
dims = []int{8, 16, 32}
indexParams = []*commonpb.KeyValuePair{
{
Key: "params",
Value: string(idxParamsPayload),
},
{
Key: "metric_type",
Value: "L2",
},
{
Key: "index_type",
Value: "IVF_FLAT",
},
}
)
in, err := NewMockIndexNodeComponent(context.TODO())
assert.Nil(t, err)
ctx := context.TODO()
state, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, internalpb.StateCode_Healthy)
mockChunkMgr := mockChunkMgrIns
tasks := make([]*testTask, 0)
var i int64
t.Logf("preparing mock data...")
wg := sync.WaitGroup{}
for i = 0; i < 256; i++ {
task := &testTask{
buildID: i + buildID0,
collID: i + collID0,
partID: i + partID0,
segID: i + segID0,
idxID: i + idxID0,
typeParams: typesParamsLists[i%3],
dim: dims[i%3],
rownum: rowNums[i%3],
idxParams: indexParams,
}
wg.Add(1)
go func() {
defer wg.Done()
if rand.Float32() < 0.5 {
mockChunkMgr.mockFieldData(task.rownum, task.dim, task.collID, task.partID, task.segID)
}
}()
tasks = append(tasks, task)
}
wg.Wait()
t.Logf("start concurent testing")
testwg := sync.WaitGroup{}
for i := 0; i < len(tasks); i++ {
req := &indexnodepb.CreateJobRequest{
ClusterID: clusterID,
IndexFilePrefix: "mock_idx",
BuildID: tasks[i].buildID,
DataPaths: []string{dataPath(tasks[i].collID, tasks[i].partID, tasks[i].segID)},
IndexVersion: 0,
IndexID: tasks[i].idxID,
IndexName: fmt.Sprintf("idx%d", tasks[i].idxID),
IndexParams: tasks[i].idxParams,
TypeParams: tasks[i].typeParams,
}
testwg.Add(1)
go func() {
defer testwg.Done()
status, err := in.CreateJob(ctx, req)
assert.Nil(t, err)
if status.ErrorCode != commonpb.ErrorCode_Success {
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, status.Reason, cancelErr.Error())
}
}()
testwg.Add(1)
go func(idx int) {
defer testwg.Done()
if rand.Float32() < 0.5 {
status, err := in.DropJobs(ctx, &indexnodepb.DropJobsRequest{
ClusterID: clusterID,
BuildIDs: []int64{tasks[idx].buildID},
})
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
}
}(i)
}
testwg.Wait()
timeout := time.After(time.Second * 30)
Loop:
for {
select {
case <-timeout:
t.Fatal("timeout testing")
default:
jobNumRet, err := in.GetJobStats(ctx, &indexnodepb.GetJobStatsRequest{})
assert.Nil(t, err)
assert.Equal(t, jobNumRet.Status.ErrorCode, commonpb.ErrorCode_Success)
if jobNumRet.PendingJobs == 0 {
break Loop
}
}
}
buildIDs := make([]int64, 0, len(tasks))
for _, task := range tasks {
buildIDs = append(buildIDs, task.buildID)
}
jobresp, err := in.QueryJobs(ctx, &indexnodepb.QueryJobsRequest{
ClusterID: clusterID,
BuildIDs: buildIDs,
})
assert.Nil(t, err)
assert.Equal(t, jobresp.Status.ErrorCode, jobresp.Status.ErrorCode)
for _, job := range jobresp.IndexInfos {
task := tasks[job.BuildID-buildID0]
if job.State == commonpb.IndexState_Finished {
for _, idxFile := range job.IndexFiles {
_, ok := mockChunkMgr.indexedData.Load(idxFile)
assert.True(t, ok)
}
t.Logf("buildID: %d, indexFiles: %v", job.BuildID, job.IndexFiles)
} else {
_, ok := mockChunkMgr.indexedData.Load(dataPath(task.collID, task.partID, task.segID))
assert.False(t, ok)
}
}
// stop indexnode
assert.Nil(t, in.Stop())
node := in.(*mockIndexNodeComponent).IndexNode
assert.Equal(t, 0, len(node.tasks))
assert.Equal(t, internalpb.StateCode_Abnormal, node.stateCode.Load().(internalpb.StateCode))
}
func TestAbnormalIndexNode(t *testing.T) {
in, err := NewMockIndexNodeComponent(context.TODO())
assert.Nil(t, err)
assert.Nil(t, in.Stop())
ctx := context.TODO()
status, err := in.CreateJob(ctx, &indexnodepb.CreateJobRequest{})
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
qresp, err := in.QueryJobs(ctx, &indexnodepb.QueryJobsRequest{})
assert.Nil(t, err)
assert.Equal(t, qresp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
status, err = in.DropJobs(ctx, &indexnodepb.DropJobsRequest{})
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
jobNumRsp, err := in.GetJobStats(ctx, &indexnodepb.GetJobStatsRequest{})
assert.Nil(t, err)
assert.Equal(t, jobNumRsp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
metricsResp, err := in.GetMetrics(ctx, &milvuspb.GetMetricsRequest{})
assert.Nil(t, err)
assert.Equal(t, metricsResp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
}
func TestGetMetrics(t *testing.T) {
var (
ctx = context.TODO()
metricReq, _ = metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
)
in, err := NewMockIndexNodeComponent(ctx)
assert.Nil(t, err)
resp, err := in.GetMetrics(ctx, metricReq)
assert.Nil(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
t.Logf("Component: %s, Metrics: %s", resp.ComponentName, resp.Response)
}
func TestGetMetricsError(t *testing.T) {
var (
ctx = context.TODO()
)
in, err := NewMockIndexNodeComponent(ctx)
assert.Nil(t, err)
errReq := &milvuspb.GetMetricsRequest{
Request: `{"metric_typ": "system_info"}`,
}
resp, err := in.GetMetrics(ctx, errReq)
assert.Nil(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
unsupportedReq := &milvuspb.GetMetricsRequest{
Request: `{"metric_type": "application_info"}`,
}
resp, err = in.GetMetrics(ctx, unsupportedReq)
assert.Nil(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, resp.Status.Reason, metricsinfo.MsgUnimplementedMetric)
}

View File

@ -14,836 +14,142 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package indexnode
import (
"container/list"
"context"
"encoding/json"
"fmt"
"os"
"path"
"strconv"
"testing"
"time"
"go.uber.org/zap"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/stretchr/testify/assert"
)
func TestIndexNode(t *testing.T) {
ctx := context.Background()
indexID := UniqueID(999)
indexBuildID1 := UniqueID(54321)
indexBuildID2 := UniqueID(12345)
floatVectorFieldID := UniqueID(101)
binaryVectorFieldID := UniqueID(102)
tsFieldID := UniqueID(1)
collectionID := UniqueID(201)
floatVectorFieldName := "float_vector"
binaryVectorFieldName := "binary_vector"
metaPath1 := "FloatVector"
metaPath2 := "BinaryVector"
metaPath3 := "FloatVectorDeleted"
floatVectorBinlogPath := "float_vector_binlog"
binaryVectorBinlogPath := "binary_vector_binlog"
factory := dependency.NewDefaultFactory(true)
func TestRegister(t *testing.T) {
var (
factory = &mockFactory{}
ctx = context.TODO()
)
Params.Init()
in, err := NewIndexNode(ctx, factory)
assert.Nil(t, err)
Params.Init()
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.NoError(t, err)
in.SetEtcdClient(etcdCli)
defer etcdCli.Close()
err = in.Init()
assert.Nil(t, err)
err = in.Start()
assert.Nil(t, err)
err = in.Register()
assert.Nil(t, err)
in.chunkManager = storage.NewLocalChunkManager(storage.RootPath("/tmp/lib/milvus"))
t.Run("CreateIndex FloatVector", func(t *testing.T) {
var insertCodec storage.InsertCodec
insertCodec.Schema = &etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: floatVectorFieldID,
Name: floatVectorFieldName,
IsPrimaryKey: false,
DataType: schemapb.DataType_FloatVector,
},
},
},
}
data := make(map[UniqueID]storage.FieldData)
tsData := make([]int64, nb)
for i := 0; i < nb; i++ {
tsData[i] = int64(i + 100)
}
data[tsFieldID] = &storage.Int64FieldData{
NumRows: []int64{nb},
Data: tsData,
}
data[floatVectorFieldID] = &storage.FloatVectorFieldData{
NumRows: []int64{nb},
Data: generateFloatVectors(),
Dim: dim,
}
insertData := storage.InsertData{
Data: data,
Infos: []storage.BlobInfo{
{
Length: 10,
},
},
}
binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
assert.Nil(t, err)
kvs := make(map[string][]byte, len(binLogs))
paths := make([]string, 0, len(binLogs))
for i, blob := range binLogs {
key := path.Join(floatVectorBinlogPath, strconv.Itoa(i))
paths = append(paths, key)
kvs[key] = blob.Value[:]
}
err = in.chunkManager.MultiWrite(kvs)
assert.Nil(t, err)
indexMeta := &indexpb.IndexMeta{
IndexBuildID: indexBuildID1,
State: commonpb.IndexState_InProgress,
IndexVersion: 1,
}
value, err := proto.Marshal(indexMeta)
assert.Nil(t, err)
err = in.etcdKV.Save(metaPath1, string(value))
assert.Nil(t, err)
req := &indexpb.CreateIndexRequest{
IndexBuildID: indexBuildID1,
IndexName: "FloatVector",
IndexID: indexID,
Version: 1,
MetaPath: metaPath1,
DataPaths: paths,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "8",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "index_type",
Value: "IVF_SQ8",
},
{
Key: "params",
Value: "{\"nlist\": 128}",
},
{
Key: "metric_type",
Value: "L2",
},
},
}
status, err2 := in.CreateIndex(ctx, req)
assert.Nil(t, err2)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
strValue, err3 := in.etcdKV.Load(metaPath1)
assert.Nil(t, err3)
indexMetaTmp := indexpb.IndexMeta{}
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
for indexMetaTmp.State != commonpb.IndexState_Finished {
time.Sleep(100 * time.Millisecond)
strValue, err := in.etcdKV.Load(metaPath1)
assert.Nil(t, err)
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
}
defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
defer func() {
for k := range kvs {
err = in.chunkManager.Remove(k)
assert.Nil(t, err)
}
}()
defer in.etcdKV.RemoveWithPrefix(metaPath1)
})
t.Run("CreateIndex BinaryVector", func(t *testing.T) {
var insertCodec storage.InsertCodec
insertCodec.Schema = &etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: binaryVectorFieldID,
Name: binaryVectorFieldName,
IsPrimaryKey: false,
DataType: schemapb.DataType_BinaryVector,
},
},
},
}
data := make(map[UniqueID]storage.FieldData)
tsData := make([]int64, nb)
for i := 0; i < nb; i++ {
tsData[i] = int64(i + 100)
}
data[tsFieldID] = &storage.Int64FieldData{
NumRows: []int64{nb},
Data: tsData,
}
data[binaryVectorFieldID] = &storage.BinaryVectorFieldData{
NumRows: []int64{nb},
Data: generateBinaryVectors(),
Dim: dim,
}
insertData := storage.InsertData{
Data: data,
Infos: []storage.BlobInfo{
{
Length: 10,
},
},
}
binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
assert.Nil(t, err)
kvs := make(map[string][]byte, len(binLogs))
paths := make([]string, 0, len(binLogs))
for i, blob := range binLogs {
key := path.Join(binaryVectorBinlogPath, strconv.Itoa(i))
paths = append(paths, key)
kvs[key] = blob.Value[:]
}
err = in.chunkManager.MultiWrite(kvs)
assert.Nil(t, err)
indexMeta := &indexpb.IndexMeta{
IndexBuildID: indexBuildID2,
State: commonpb.IndexState_InProgress,
IndexVersion: 1,
}
value, err := proto.Marshal(indexMeta)
assert.Nil(t, err)
err = in.etcdKV.Save(metaPath2, string(value))
assert.Nil(t, err)
req := &indexpb.CreateIndexRequest{
IndexBuildID: indexBuildID2,
IndexName: "BinaryVector",
IndexID: indexID,
Version: 1,
MetaPath: metaPath2,
DataPaths: paths,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "8",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "index_type",
Value: "BIN_FLAT",
},
{
Key: "metric_type",
Value: "JACCARD",
},
},
}
status, err2 := in.CreateIndex(ctx, req)
assert.Nil(t, err2)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
strValue, err3 := in.etcdKV.Load(metaPath2)
assert.Nil(t, err3)
indexMetaTmp := indexpb.IndexMeta{}
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
for indexMetaTmp.State != commonpb.IndexState_Finished {
time.Sleep(100 * time.Millisecond)
strValue, err = in.etcdKV.Load(metaPath2)
assert.Nil(t, err)
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
}
defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
defer func() {
for k := range kvs {
err = in.chunkManager.Remove(k)
assert.Nil(t, err)
}
}()
defer in.etcdKV.RemoveWithPrefix(metaPath2)
})
t.Run("Create DeletedIndex", func(t *testing.T) {
var insertCodec storage.InsertCodec
insertCodec.Schema = &etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: floatVectorFieldID,
Name: floatVectorFieldName,
IsPrimaryKey: false,
DataType: schemapb.DataType_FloatVector,
},
},
},
}
data := make(map[UniqueID]storage.FieldData)
tsData := make([]int64, nb)
for i := 0; i < nb; i++ {
tsData[i] = int64(i + 100)
}
data[tsFieldID] = &storage.Int64FieldData{
NumRows: []int64{nb},
Data: tsData,
}
data[floatVectorFieldID] = &storage.FloatVectorFieldData{
NumRows: []int64{nb},
Data: generateFloatVectors(),
Dim: dim,
}
insertData := storage.InsertData{
Data: data,
Infos: []storage.BlobInfo{
{
Length: 10,
},
},
}
binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
assert.Nil(t, err)
kvs := make(map[string][]byte, len(binLogs))
paths := make([]string, 0, len(binLogs))
for i, blob := range binLogs {
key := path.Join(floatVectorBinlogPath, strconv.Itoa(i))
paths = append(paths, key)
kvs[key] = blob.Value[:]
}
err = in.chunkManager.MultiWrite(kvs)
assert.Nil(t, err)
indexMeta := &indexpb.IndexMeta{
IndexBuildID: indexBuildID1,
State: commonpb.IndexState_InProgress,
IndexVersion: 1,
MarkDeleted: true,
}
value, err := proto.Marshal(indexMeta)
assert.Nil(t, err)
err = in.etcdKV.Save(metaPath3, string(value))
assert.Nil(t, err)
req := &indexpb.CreateIndexRequest{
IndexBuildID: indexBuildID1,
IndexName: "FloatVector",
IndexID: indexID,
Version: 1,
MetaPath: metaPath3,
DataPaths: paths,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "8",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "index_type",
Value: "IVF_SQ8",
},
{
Key: "params",
Value: "{\"nlist\": 128}",
},
{
Key: "metric_type",
Value: "L2",
},
},
}
status, err2 := in.CreateIndex(ctx, req)
assert.Nil(t, err2)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
time.Sleep(100 * time.Millisecond)
strValue, err3 := in.etcdKV.Load(metaPath3)
assert.Nil(t, err3)
indexMetaTmp := indexpb.IndexMeta{}
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
assert.Equal(t, true, indexMetaTmp.MarkDeleted)
assert.Equal(t, int64(1), indexMetaTmp.IndexVersion)
//for indexMetaTmp.State != commonpb.IndexState_Finished {
// time.Sleep(100 * time.Millisecond)
// strValue, err := in.etcdKV.Load(metaPath3)
// assert.Nil(t, err)
// err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
// assert.Nil(t, err)
//}
defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
defer func() {
for k := range kvs {
err = in.chunkManager.Remove(k)
assert.Nil(t, err)
}
}()
defer in.etcdKV.RemoveWithPrefix(metaPath3)
})
t.Run("GetComponentStates", func(t *testing.T) {
resp, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, internalpb.StateCode_Healthy, resp.State.StateCode)
})
t.Run("GetTimeTickChannel", func(t *testing.T) {
resp, err := in.GetTimeTickChannel(ctx)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("GetStatisticsChannel", func(t *testing.T) {
resp, err := in.GetStatisticsChannel(ctx)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
})
t.Run("GetMetrics_system_info", func(t *testing.T) {
req, err := metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
assert.Nil(t, err)
resp, err := in.GetMetrics(ctx, req)
assert.Nil(t, err)
log.Info("GetMetrics_system_info",
zap.String("resp", resp.Response),
zap.String("name", resp.ComponentName))
})
err = in.etcdKV.RemoveWithPrefix("session/IndexNode")
assert.Nil(t, err)
err = in.Stop()
assert.Nil(t, err)
}
func TestCreateIndexFailed(t *testing.T) {
ctx := context.Background()
indexID := UniqueID(1001)
indexBuildID1 := UniqueID(54322)
indexBuildID2 := UniqueID(54323)
floatVectorFieldID := UniqueID(102)
tsFieldID := UniqueID(1)
collectionID := UniqueID(202)
floatVectorFieldName := "float_vector"
metaPath1 := "FloatVector1"
metaPath2 := "FloatVector2"
floatVectorBinlogPath := "float_vector_binlog"
factory := dependency.NewDefaultFactory(true)
in, err := NewIndexNode(ctx, factory)
assert.Nil(t, err)
Params.Init()
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.NoError(t, err)
in.SetEtcdClient(etcdCli)
defer etcdCli.Close()
err = in.Init()
assert.Nil(t, err)
err = in.Start()
assert.Nil(t, err)
err = in.Register()
assert.Nil(t, err)
in.chunkManager = storage.NewLocalChunkManager(storage.RootPath("/tmp/lib/milvus"))
t.Run("CreateIndex error", func(t *testing.T) {
var insertCodec storage.InsertCodec
insertCodec.Schema = &etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: floatVectorFieldID,
Name: floatVectorFieldName,
IsPrimaryKey: false,
DataType: schemapb.DataType_FloatVector,
},
},
},
}
data := make(map[UniqueID]storage.FieldData)
tsData := make([]int64, nb)
for i := 0; i < nb; i++ {
tsData[i] = int64(i + 100)
}
data[tsFieldID] = &storage.Int64FieldData{
NumRows: []int64{nb},
Data: tsData,
}
data[floatVectorFieldID] = &storage.FloatVectorFieldData{
NumRows: []int64{nb},
Data: generateFloatVectors(),
Dim: dim,
}
insertData := storage.InsertData{
Data: data,
Infos: []storage.BlobInfo{
{
Length: 10,
},
},
}
binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
assert.Nil(t, err)
kvs := make(map[string][]byte, len(binLogs))
paths := make([]string, 0, len(binLogs))
for i, blob := range binLogs {
key := path.Join(floatVectorBinlogPath, strconv.Itoa(i))
paths = append(paths, key)
kvs[key] = blob.Value[:]
}
err = in.chunkManager.MultiWrite(kvs)
assert.Nil(t, err)
indexMeta := &indexpb.IndexMeta{
IndexBuildID: indexBuildID1,
State: commonpb.IndexState_InProgress,
IndexVersion: 1,
}
value, err := proto.Marshal(indexMeta)
assert.Nil(t, err)
err = in.etcdKV.Save(metaPath1, string(value))
assert.Nil(t, err)
req := &indexpb.CreateIndexRequest{
IndexBuildID: indexBuildID1,
IndexName: "FloatVector",
IndexID: indexID,
Version: 1,
MetaPath: metaPath1,
DataPaths: paths,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "8",
},
{
Key: "dim",
Value: "8",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "index_type",
Value: "IVF_SQ8",
},
{
Key: "params",
Value: "{\"nlist\": 128}",
},
{
Key: "metric_type",
Value: "L2",
},
},
}
status, err2 := in.CreateIndex(ctx, req)
assert.Nil(t, err2)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
strValue, err3 := in.etcdKV.Load(metaPath1)
assert.Nil(t, err3)
indexMetaTmp := indexpb.IndexMeta{}
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
for indexMetaTmp.State != commonpb.IndexState_Failed {
time.Sleep(100 * time.Millisecond)
strValue, err = in.etcdKV.Load(metaPath1)
assert.Nil(t, err)
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
}
defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
defer func() {
for k := range kvs {
err = in.chunkManager.Remove(k)
assert.Nil(t, err)
}
}()
})
t.Run("Invalid Param", func(t *testing.T) {
var insertCodec storage.InsertCodec
insertCodec.Schema = &etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: floatVectorFieldID,
Name: floatVectorFieldName,
IsPrimaryKey: false,
DataType: schemapb.DataType_FloatVector,
},
},
},
}
data := make(map[UniqueID]storage.FieldData)
tsData := make([]int64, nb)
for i := 0; i < nb; i++ {
tsData[i] = int64(i + 100)
}
data[tsFieldID] = &storage.Int64FieldData{
NumRows: []int64{nb},
Data: tsData,
}
data[floatVectorFieldID] = &storage.FloatVectorFieldData{
NumRows: []int64{nb},
Data: generateFloatVectors(),
Dim: dim,
}
insertData := storage.InsertData{
Data: data,
Infos: []storage.BlobInfo{
{
Length: 10,
},
},
}
binLogs, _, err := insertCodec.Serialize(999, 888, &insertData)
assert.Nil(t, err)
kvs := make(map[string][]byte, len(binLogs))
paths := make([]string, 0, len(binLogs))
for i, blob := range binLogs {
key := path.Join(floatVectorBinlogPath, strconv.Itoa(i))
paths = append(paths, key)
kvs[key] = blob.Value[:]
}
err = in.chunkManager.MultiWrite(kvs)
assert.Nil(t, err)
indexMeta2 := &indexpb.IndexMeta{
IndexBuildID: indexBuildID2,
State: commonpb.IndexState_InProgress,
IndexVersion: 1,
}
value2, err := proto.Marshal(indexMeta2)
assert.Nil(t, err)
err = in.etcdKV.Save(metaPath2, string(value2))
assert.Nil(t, err)
req2 := &indexpb.CreateIndexRequest{
IndexBuildID: indexBuildID2,
IndexName: "FloatVector",
IndexID: indexID,
Version: 1,
MetaPath: metaPath2,
DataPaths: paths,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "8",
},
{
Key: "params",
Value: "value",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "index_type",
Value: "IVF_SQ8",
},
{
Key: "params",
Value: "{\"nlist\": 128}",
},
{
Key: "metric_type",
Value: "L2",
},
},
}
status, err2 := in.CreateIndex(ctx, req2)
assert.Nil(t, err2)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
strValue, err3 := in.etcdKV.Load(metaPath2)
assert.Nil(t, err3)
indexMetaTmp := indexpb.IndexMeta{}
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
for indexMetaTmp.State != commonpb.IndexState_Failed {
time.Sleep(100 * time.Millisecond)
strValue, err = in.etcdKV.Load(metaPath2)
assert.Nil(t, err)
err = proto.Unmarshal([]byte(strValue), &indexMetaTmp)
assert.Nil(t, err)
}
defer in.chunkManager.MultiRemove(indexMetaTmp.IndexFilePaths)
defer func() {
for k := range kvs {
err = in.chunkManager.Remove(k)
assert.Nil(t, err)
}
}()
})
t.Run("CreateIndex server not healthy", func(t *testing.T) {
in.UpdateStateCode(internalpb.StateCode_Initializing)
status, err := in.CreateIndex(ctx, &indexpb.CreateIndexRequest{})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
})
err = in.etcdKV.RemoveWithPrefix("session/IndexNode")
assert.Nil(t, err)
err = in.Stop()
assert.Nil(t, err)
}
func TestIndexNode_Error(t *testing.T) {
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
in, err := NewIndexNode(ctx, factory)
assert.Nil(t, err)
Params.Init()
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.NoError(t, err)
in.SetEtcdClient(etcdCli)
defer etcdCli.Close()
err = in.Init()
assert.Nil(t, err)
err = in.Start()
assert.Nil(t, err)
err = in.Register()
assert.Nil(t, err)
in.UpdateStateCode(internalpb.StateCode_Initializing)
in.chunkManager = storage.NewLocalChunkManager(storage.RootPath("/tmp/lib/milvus"))
t.Run("CreateIndex", func(t *testing.T) {
status, err := in.CreateIndex(ctx, &indexpb.CreateIndexRequest{})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
})
t.Run("GetMetrics", func(t *testing.T) {
resp, err := in.GetMetrics(ctx, &milvuspb.GetMetricsRequest{})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
in.UpdateStateCode(internalpb.StateCode_Healthy)
t.Run("Request Illegal", func(t *testing.T) {
resp, err := in.GetMetrics(ctx, &milvuspb.GetMetricsRequest{})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
t.Run("MetricsTypeIllegal", func(t *testing.T) {
req, err := metricsinfo.ConstructRequestByMetricType("GetIndexNodeMetrics")
assert.Nil(t, err)
resp, err := in.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
})
err = in.etcdKV.RemoveWithPrefix("session/IndexNode")
assert.Nil(t, err)
err = in.Stop()
assert.Nil(t, err)
}
func TestIndexNode_InitError(t *testing.T) {
ctx := context.Background()
in := &IndexNode{
sched: &TaskScheduler{
IndexBuildQueue: &IndexBuildTaskQueue{
BaseTaskQueue: BaseTaskQueue{
unissuedTasks: list.New(),
activeTasks: make(map[UniqueID]task),
maxTaskNum: 0,
utBufChan: make(chan int, 1024),
},
},
},
in.SetEtcdClient(getEtcdClient())
assert.Nil(t, in.initSession())
assert.Nil(t, in.Register())
key := in.session.ServerName
if !in.session.Exclusive {
key = fmt.Sprintf("%s-%d", key, in.session.ServerID)
}
in.UpdateStateCode(internalpb.StateCode_Healthy)
resp, err := getEtcdClient().Get(ctx, path.Join(Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot, key))
assert.Nil(t, err)
assert.Equal(t, int64(1), resp.Count)
sess := &sessionutil.Session{}
assert.Nil(t, json.Unmarshal(resp.Kvs[0].Value, sess))
assert.Equal(t, sess.ServerID, in.session.ServerID)
assert.Equal(t, sess.Address, in.session.Address)
assert.Equal(t, sess.ServerName, in.session.ServerName)
t.Run("CreateIndex", func(t *testing.T) {
status, err := in.CreateIndex(ctx, &indexpb.CreateIndexRequest{})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
})
// revoke lease
in.session.Revoke(time.Second)
resp, err = getEtcdClient().Get(ctx, path.Join(Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot, in.session.ServerName))
assert.Nil(t, err)
assert.Equal(t, resp.Count, int64(0))
}
func TestIndexNode_GetComponentStates(t *testing.T) {
n := &IndexNode{}
n.stateCode.Store(internalpb.StateCode_Healthy)
resp, err := n.GetComponentStates(context.Background())
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, common.NotRegisteredID, resp.State.NodeID)
n.session = &sessionutil.Session{}
n.session.UpdateRegistered(true)
resp, err = n.GetComponentStates(context.Background())
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
func TestComponentState(t *testing.T) {
var (
factory = &mockFactory{
chunkMgr: &mockChunkmgr{},
}
ctx = context.TODO()
)
Params.Init()
in, err := NewIndexNode(ctx, factory)
assert.Nil(t, err)
in.SetEtcdClient(getEtcdClient())
state, err := in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, internalpb.StateCode_Abnormal)
assert.Nil(t, in.Init())
state, err = in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, internalpb.StateCode_Initializing)
assert.Nil(t, in.Start())
state, err = in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, internalpb.StateCode_Healthy)
assert.Nil(t, in.Stop())
state, err = in.GetComponentStates(ctx)
assert.Nil(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, internalpb.StateCode_Abnormal)
}
func TestGetTimeTickChannel(t *testing.T) {
var (
factory = &mockFactory{
chunkMgr: &mockChunkmgr{},
}
ctx = context.TODO()
)
Params.Init()
in, err := NewIndexNode(ctx, factory)
assert.Nil(t, err)
ret, err := in.GetTimeTickChannel(ctx)
assert.Nil(t, err)
assert.Equal(t, ret.Status.ErrorCode, commonpb.ErrorCode_Success)
}
func TestGetStatisticChannel(t *testing.T) {
var (
factory = &mockFactory{
chunkMgr: &mockChunkmgr{},
}
ctx = context.TODO()
)
Params.Init()
in, err := NewIndexNode(ctx, factory)
assert.Nil(t, err)
ret, err := in.GetStatisticsChannel(ctx)
assert.Nil(t, err)
assert.Equal(t, ret.Status.ErrorCode, commonpb.ErrorCode_Success)
}
func TestInitErr(t *testing.T) {
// var (
// factory = &mockFactory{}
// ctx = context.TODO()
// )
// in, err := NewIndexNode(ctx, factory)
// assert.Nil(t, err)
// in.SetEtcdClient(getEtcdClient())
// assert.Error(t, in.Init())
}
func setup() {
startEmbedEtcd()
}
func teardown() {
stopEmbedEtcd()
}
func TestMain(m *testing.M) {
setup()
code := m.Run()
teardown()
os.Exit(code)
}

View File

@ -22,284 +22,130 @@ import (
"fmt"
"path"
"runtime"
"runtime/debug"
"strconv"
"time"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/indexnodepb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/indexcgowrapper"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
)
const (
// paramsKeyToParse is the key of the param to build index.
paramsKeyToParse = "params"
)
// IndexBuildTaskName is the name of the operation to add an index task.
IndexBuildTaskName = "IndexBuildTask"
var (
cancelErr = fmt.Errorf("cancelled")
)
type Blob = storage.Blob
type taskInfo struct {
cancel context.CancelFunc
state commonpb.IndexState
indexfiles []string
// task statistics
statistic *indexnodepb.JobInfo
}
type task interface {
Ctx() context.Context
ID() UniqueID // return ReqID
Name() string
SetID(uid UniqueID) // set ReqID
PreExecute(ctx context.Context) error
Execute(ctx context.Context) error
PostExecute(ctx context.Context) error
WaitToFinish() error
Notify(err error)
OnEnqueue() error
SetError(err error)
SetState(state TaskState)
GetState() TaskState
}
// BaseTask is an basic instance of task.
type BaseTask struct {
done chan error
ctx context.Context
id UniqueID
err error
internalErr error
state TaskState
}
// SetState sets task's state.
func (bt *BaseTask) SetState(state TaskState) {
bt.state = state
}
// GetState gets task's state.
func (bt *BaseTask) GetState() TaskState {
return bt.state
}
// SetError sets an error to task.
func (bt *BaseTask) SetError(err error) {
bt.err = err
}
// ID returns the id of index task.
func (bt *BaseTask) ID() UniqueID {
return bt.id
}
// setID set the ID for the task.
func (bt *BaseTask) setID(id UniqueID) {
bt.id = id
}
// WaitToFinish will wait for the task to complete, if the context is done, it means that the execution of the task has timed out.
func (bt *BaseTask) WaitToFinish() error {
select {
case <-bt.ctx.Done():
return errors.New("timeout")
case err := <-bt.done:
return err
}
}
// Notify will notify WaitToFinish that the task is completed or failed.
func (bt *BaseTask) Notify(err error) {
bt.done <- err
Prepare(context.Context) error
LoadData(context.Context) error
BuildIndex(context.Context) error
SaveIndexFiles(context.Context) error
OnEnqueue(context.Context) error
SetState(state commonpb.IndexState)
GetState() commonpb.IndexState
Reset()
}
// IndexBuildTask is used to record the information of the index tasks.
type IndexBuildTask struct {
BaseTask
type indexBuildTask struct {
ident string
cancel context.CancelFunc
ctx context.Context
cm storage.ChunkManager
index indexcgowrapper.CodecIndex
etcdKV kv.MetaKv
savePaths []string
req *indexpb.CreateIndexRequest
req *indexnodepb.CreateJobRequest
BuildID UniqueID
nodeID UniqueID
serializedSize uint64
ClusterID UniqueID
collectionID UniqueID
partitionID UniqueID
segmentID UniqueID
fieldID UniqueID
fieldData storage.FieldData
indexBlobs []*storage.Blob
newTypeParams map[string]string
newIndexParams map[string]string
serializedSize uint64
tr *timerecord.TimeRecorder
statistic indexnodepb.JobInfo
node *IndexNode
}
func (it *indexBuildTask) Reset() {
it.ident = ""
it.cancel = nil
it.ctx = nil
it.cm = nil
it.index = nil
it.savePaths = nil
it.req = nil
it.fieldData = nil
it.indexBlobs = nil
it.newTypeParams = nil
it.newIndexParams = nil
it.tr = nil
it.node = nil
}
// Ctx is the context of index tasks.
func (it *IndexBuildTask) Ctx() context.Context {
func (it *indexBuildTask) Ctx() context.Context {
return it.ctx
}
// ID returns the id of index task.
func (it *IndexBuildTask) ID() UniqueID {
return it.id
}
// SetID sets the id for index task.
func (it *IndexBuildTask) SetID(ID UniqueID) {
it.BaseTask.setID(ID)
}
// Name is the name of task to build index.
func (bt *BaseTask) Name() string {
return IndexBuildTaskName
func (it *indexBuildTask) Name() string {
return it.ident
}
func (it *indexBuildTask) SetState(state commonpb.IndexState) {
it.node.storeTaskState(it.ClusterID, it.BuildID, state)
}
func (it *indexBuildTask) GetState() commonpb.IndexState {
state, ok := it.node.loadTaskState(it.ClusterID, it.BuildID)
if !ok {
return commonpb.IndexState_IndexStateNone
}
return state
}
// OnEnqueue enqueues indexing tasks.
func (it *IndexBuildTask) OnEnqueue() error {
it.SetID(it.req.IndexBuildID)
it.SetState(TaskStateNormal)
log.Debug("IndexNode IndexBuilderTask Enqueue", zap.Int64("taskID", it.ID()), zap.Int64("index buildID", it.req.IndexBuildID))
it.tr = timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildTask %d", it.req.IndexBuildID))
func (it *indexBuildTask) OnEnqueue(ctx context.Context) error {
it.statistic.StartTime = time.Now().UnixMicro()
it.statistic.PodID = it.node.GetNodeID()
logutil.Logger(ctx).Debug("IndexNode IndexBuilderTask Enqueue")
return nil
}
// loadIndexMeta load meta from etcd.
func (it *IndexBuildTask) loadIndexMeta(ctx context.Context) (*indexpb.IndexMeta, int64, error) {
indexMeta := &indexpb.IndexMeta{}
var source int64
fn := func() error {
//TODO error handling need to be optimized, return Unrecoverable to avoid retry
_, values, versions, err := it.etcdKV.LoadWithPrefix2(it.req.MetaPath)
if err != nil {
return err
}
if len(values) == 0 {
log.Warn("IndexNode loadIndexMeta get empty, maybe the task has been recycled, set task to abandon",
zap.Int64("buildID", it.req.IndexBuildID))
it.SetState(TaskStateAbandon)
return nil
}
err = proto.Unmarshal([]byte(values[0]), indexMeta)
if err != nil {
return err
}
source = versions[0]
return nil
}
err := retry.Do(ctx, fn, retry.Attempts(3))
if err != nil {
return nil, -1, err
}
return indexMeta, source, nil
}
func (it *IndexBuildTask) updateTaskState(indexMeta *indexpb.IndexMeta, err error) TaskState {
if it.GetState() == TaskStateAbandon {
return it.GetState()
}
if err != nil {
log.Warn("IndexNode IndexBuildTask internal err, mark the task as retry", zap.Int64("buildID", it.req.IndexBuildID), zap.Error(err))
it.SetState(TaskStateRetry)
} else if indexMeta.IndexVersion > it.req.Version || indexMeta.State == commonpb.IndexState_Finished {
it.SetState(TaskStateAbandon)
} else if indexMeta.MarkDeleted {
it.SetState(TaskStateAbandon)
}
return it.GetState()
}
// saveIndexMeta try to save index meta to metaKV.
// if failed, IndexNode will panic to inform indexcoord.
func (it *IndexBuildTask) saveIndexMeta(ctx context.Context) error {
defer it.tr.Record("IndexNode IndexBuildTask saveIndexMeta")
fn := func() error {
indexMeta, version, err := it.loadIndexMeta(ctx)
if err != nil {
log.Error("IndexNode IndexBuildTask saveIndexMeta fail to load index meta,", zap.Int64("build Id", it.req.IndexBuildID), zap.Error(err))
return err
}
taskState := it.updateTaskState(indexMeta, it.internalErr)
if taskState == TaskStateAbandon {
log.Warn("IndexNode IndexBuildTask saveIndexMeta success because task abandon", zap.String("TaskState", taskState.String()),
zap.Int64("IndexBuildID", indexMeta.IndexBuildID))
return nil
}
if taskState == TaskStateFailed {
log.Error("IndexNode IndexBuildTask saveIndexMeta set indexMeta.state to IndexState_Failed",
zap.String("TaskState", taskState.String()),
zap.Int64("IndexBuildID", indexMeta.IndexBuildID), zap.Error(it.err))
indexMeta.State = commonpb.IndexState_Failed
indexMeta.FailReason = it.err.Error()
} else if taskState == TaskStateRetry {
log.Info("IndexNode IndexBuildTask saveIndexMeta set indexMeta.state to IndexState_Unissued",
zap.String("TaskState", taskState.String()),
zap.Int64("IndexBuildID", indexMeta.IndexBuildID), zap.Error(it.internalErr))
indexMeta.State = commonpb.IndexState_Unissued
} else { // TaskStateNormal
indexMeta.IndexFilePaths = it.savePaths
indexMeta.SerializeSize = it.serializedSize
log.Info("IndexNode IndexBuildTask saveIndexMeta indexMeta.state to IndexState_Finished",
zap.String("TaskState", taskState.String()),
zap.Int64("IndexBuildID", indexMeta.IndexBuildID))
indexMeta.State = commonpb.IndexState_Finished
}
var metaValue []byte
metaValue, err = proto.Marshal(indexMeta)
if err != nil {
log.Warn("IndexNode IndexBuildTask saveIndexMeta fail to marshal index meta,", zap.Int64("build Id", indexMeta.IndexBuildID), zap.Error(err))
return err
}
strMetaValue := string(metaValue)
success, err := it.etcdKV.CompareVersionAndSwap(it.req.MetaPath, version, strMetaValue)
if err != nil {
// TODO, we don't need to reload if it is just etcd error
log.Warn("failed to compare and swap in etcd", zap.Int64("buildID", it.req.IndexBuildID), zap.Error(err))
return err
}
if !success {
return fmt.Errorf("failed to save index meta in etcd, buildId: %d, source version: %d", it.req.IndexBuildID, version)
}
return nil
}
err := retry.Do(ctx, fn, retry.Attempts(3))
if err != nil {
panic(err.Error())
}
return nil
}
// PreExecute does some checks before building the index, for example, whether the index has been deleted.
func (it *IndexBuildTask) PreExecute(ctx context.Context) error {
log.Debug("IndexNode IndexBuildTask preExecute...", zap.Int64("buildId", it.req.IndexBuildID))
sp, ctx := trace.StartSpanFromContextWithOperationName(ctx, "CreateIndex-PreExecute")
defer sp.Finish()
indexMeta, _, err := it.loadIndexMeta(ctx)
if err != nil {
// assume that we can loadIndexMeta later...
return nil
}
it.updateTaskState(indexMeta, nil)
return nil
}
// PostExecute does some checks after building the index, for example, whether the index has been deleted or
// whether the index task is up to date.
func (it *IndexBuildTask) PostExecute(ctx context.Context) error {
log.Debug("IndexNode IndexBuildTask PostExecute...", zap.Int64("buildId", it.req.IndexBuildID))
sp, _ := trace.StartSpanFromContextWithOperationName(ctx, "CreateIndex-PostExecute")
defer sp.Finish()
return it.saveIndexMeta(ctx)
}
func (it *IndexBuildTask) prepareParams(ctx context.Context) error {
func (it *indexBuildTask) Prepare(ctx context.Context) error {
typeParams := make(map[string]string)
for _, kvPair := range it.req.GetTypeParams() {
key, value := kvPair.GetKey(), kvPair.GetValue()
@ -341,10 +187,32 @@ func (it *IndexBuildTask) prepareParams(ctx context.Context) error {
}
it.newTypeParams = typeParams
it.newIndexParams = indexParams
it.statistic.IndexParams = it.req.GetIndexParams()
// ugly codes to get dimension
if dimStr, ok := typeParams["dim"]; ok {
var err error
it.statistic.Dim, err = strconv.ParseInt(dimStr, 10, 64)
if err != nil {
logutil.Logger(ctx).Error("parse dimesion failed", zap.Error(err))
// ignore error
}
}
// setup chunkmanager
// opts := make([]storage.Option, 0)
// // TODO: secret access key_id
// opts = append(opts, storage.AccessKeyID(it.req.StorageAccessKey))
// opts = append(opts, storage.BucketName(it.req.BucketName))
// factory := storage.NewChunkManagerFactory("local", "minio", opts...)
// var err error
// it.cm, err = factory.NewVectorStorageChunkManager(ctx)
// if err != nil {
// logutil.Logger(ctx).Error("init chunk manager failed", zap.Error(err), zap.String("BucketName", it.req.BucketName), zap.String("StorageAccessKey", it.req.StorageAccessKey))
// return err
// }
return nil
}
func (it *IndexBuildTask) loadFieldData(ctx context.Context) (storage.FieldID, storage.FieldData, error) {
func (it *indexBuildTask) LoadData(ctx context.Context) error {
getValueByPath := func(path string) ([]byte, error) {
data, err := it.cm.Read(path)
if err != nil {
@ -384,34 +252,139 @@ func (it *IndexBuildTask) loadFieldData(ctx context.Context) (storage.FieldID, s
// gomaxproc will be set by `automaxproc`, passing 0 will just retrieve the value
err := funcutil.ProcessFuncParallel(len(toLoadDataPaths), runtime.GOMAXPROCS(0), loadKey, "loadKey")
if err != nil {
log.Warn("loadKey from minio failed", zap.Error(err))
it.internalErr = err
// In this case, it.internalErr is no longer nil and err does not need to be returned, otherwise it.err will also be assigned.
return storage.InvalidUniqueID, nil, err
logutil.Logger(it.ctx).Warn("loadKey failed", zap.Error(err))
return err
}
loadVectorDuration := it.tr.RecordSpan()
log.Debug("IndexNode load data success", zap.Int64("buildId", it.req.IndexBuildID))
loadVectorDuration := it.tr.RecordSpan().Milliseconds()
logutil.Logger(ctx).Debug("indexnode load data success")
it.tr.Record("load field data done")
metrics.IndexNodeLoadFieldLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(loadVectorDuration))
return it.decodeBlobs(ctx, blobs)
}
func (it *indexBuildTask) BuildIndex(ctx context.Context) error {
dataset := indexcgowrapper.GenDataset(it.fieldData)
dType := dataset.DType
var err error
if dType != schemapb.DataType_None {
it.index, err = indexcgowrapper.NewCgoIndex(dType, it.newTypeParams, it.newIndexParams)
if err != nil {
logutil.Logger(ctx).Error("failed to create index", zap.Error(err))
return err
}
err = it.index.Build(dataset)
if err != nil {
logutil.Logger(ctx).Error("failed to build index", zap.Error(err))
return err
}
}
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(it.tr.RecordSpan().Milliseconds()))
it.tr.Record("build index done")
indexBlobs, err := it.index.Serialize()
if err != nil {
logutil.Logger(ctx).Error("IndexNode index Serialize failed", zap.Error(err))
return err
}
it.tr.Record("index serialize done")
// use serialized size before encoding
it.serializedSize = 0
for _, blob := range indexBlobs {
it.serializedSize += uint64(len(blob.Value))
}
// early release index for gc, and we can ensure that Delete is idempotent.
if err := it.index.Delete(); err != nil {
logutil.Logger(it.ctx).Error("IndexNode indexBuildTask Execute CIndexDelete failed", zap.Error(err))
}
var serializedIndexBlobs []*storage.Blob
codec := storage.NewIndexFileBinlogCodec()
serializedIndexBlobs, err = codec.Serialize(
it.req.BuildID,
it.req.IndexVersion,
it.collectionID,
it.partitionID,
it.segmentID,
it.fieldID,
it.newIndexParams,
it.req.IndexName,
it.req.IndexID,
indexBlobs,
)
if err != nil {
return err
}
encodeIndexFileDur := it.tr.Record("index codec serialize done")
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
it.indexBlobs = serializedIndexBlobs
return nil
}
func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
blobCnt := len(it.indexBlobs)
getSavePathByKey := func(key string) string {
return path.Join(it.req.IndexFilePrefix,
strconv.Itoa(int(it.req.IndexID)),
strconv.Itoa(int(it.req.BuildID)),
strconv.Itoa(int(it.req.IndexVersion)),
key)
}
savePaths := make([]string, blobCnt)
saveIndexFile := func(idx int) error {
blob := it.indexBlobs[idx]
savePath := getSavePathByKey(blob.Key)
saveFn := func() error {
return it.cm.Write(savePath, blob.Value)
}
if err := retry.Do(ctx, saveFn, retry.Attempts(5)); err != nil {
logutil.Logger(ctx).Warn("index node save index file failed", zap.Error(err), zap.String("savePath", savePath))
return err
}
savePaths[idx] = savePath
return nil
}
// If an error occurs, return the error that the task state will be set to retry.
if err := funcutil.ProcessFuncParallel(blobCnt, runtime.NumCPU(), saveIndexFile, "saveIndexFile"); err != nil {
logutil.Logger(it.ctx).Error("saveIndexFile fail")
return err
}
it.savePaths = savePaths
it.statistic.EndTime = time.Now().UnixMicro()
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, savePaths, &it.statistic)
logutil.Logger(ctx).Debug("save index files done", zap.Strings("IndexFiles", savePaths))
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
it.tr.Elapse("index building all done")
logutil.Logger(ctx).Info("IndexNode CreateIndex successfully ", zap.Int64("collect", it.collectionID),
zap.Int64("partition", it.partitionID), zap.Int64("segment", it.segmentID))
return nil
}
func (it *indexBuildTask) decodeBlobs(ctx context.Context, blobs []*storage.Blob) error {
var insertCodec storage.InsertCodec
collectionID, partitionID, segmentID, insertData, err2 := insertCodec.DeserializeAll(blobs)
if err2 != nil {
return storage.InvalidUniqueID, nil, err2
return err2
}
metrics.IndexNodeDecodeFieldLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(it.tr.RecordSpan()))
decodeDuration := it.tr.RecordSpan().Milliseconds()
metrics.IndexNodeDecodeFieldLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(decodeDuration))
if len(insertData.Data) != 1 {
return storage.InvalidUniqueID, nil, errors.New("we expect only one field in deserialized insert data")
return errors.New("we expect only one field in deserialized insert data")
}
it.collectionID = collectionID
it.partitionID = partitionID
it.segmentID = segmentID
log.Debug("IndexNode deserialize data success",
zap.Int64("taskID", it.ID()),
zap.Int64("IndexID", it.req.IndexID),
zap.Int64("index buildID", it.req.IndexBuildID),
logutil.Logger(ctx).Debug("indexnode deserialize data success",
zap.Int64("index id", it.req.IndexID),
zap.String("index name", it.req.IndexName),
zap.Int64("collectionID", it.collectionID),
zap.Int64("partitionID", it.partitionID),
zap.Int64("segmentID", it.segmentID))
@ -426,184 +399,50 @@ func (it *IndexBuildTask) loadFieldData(ctx context.Context) (storage.FieldID, s
fieldID = fID
break
}
return fieldID, data, nil
}
func (it *IndexBuildTask) buildIndex(ctx context.Context) ([]*storage.Blob, error) {
var fieldID storage.FieldID
{
var err error
var fieldData storage.FieldData
fieldID, fieldData, err = it.loadFieldData(ctx)
if err != nil {
return nil, err
}
dataset := indexcgowrapper.GenDataset(fieldData)
dType := dataset.DType
if dType != schemapb.DataType_None {
it.index, err = indexcgowrapper.NewCgoIndex(dType, it.newTypeParams, it.newIndexParams)
if err != nil {
log.Error("failed to create index", zap.Error(err))
return nil, err
}
err = it.index.Build(dataset)
if err != nil {
log.Error("failed to build index", zap.Error(err))
return nil, err
}
}
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(it.tr.RecordSpan()))
it.tr.Record("build index done")
}
indexBlobs, err := it.index.Serialize()
if err != nil {
log.Error("IndexNode index Serialize failed", zap.Error(err))
return nil, err
}
it.tr.Record("index serialize done")
// use serialized size before encoding
it.serializedSize = 0
for _, blob := range indexBlobs {
it.serializedSize += uint64(len(blob.Value))
}
// early release index for gc, and we can ensure that Delete is idempotent.
if err := it.index.Delete(); err != nil {
log.Error("IndexNode IndexBuildTask Execute CIndexDelete failed",
zap.Int64("buildId", it.req.IndexBuildID),
zap.Error(err))
}
var serializedIndexBlobs []*storage.Blob
codec := storage.NewIndexFileBinlogCodec()
serializedIndexBlobs, err = codec.Serialize(
it.req.IndexBuildID,
it.req.Version,
it.collectionID,
it.partitionID,
it.segmentID,
fieldID,
it.newIndexParams,
it.req.IndexName,
it.req.IndexID,
indexBlobs,
)
if err != nil {
return nil, err
}
encodeIndexFileDur := it.tr.Record("index codec serialize done")
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
return serializedIndexBlobs, nil
}
func (it *IndexBuildTask) saveIndex(ctx context.Context, blobs []*storage.Blob) error {
blobCnt := len(blobs)
getSavePathByKey := func(key string) string {
return path.Join(Params.IndexNodeCfg.IndexStorageRootPath, strconv.Itoa(int(it.req.IndexBuildID)), strconv.Itoa(int(it.req.Version)),
strconv.Itoa(int(it.partitionID)), strconv.Itoa(int(it.segmentID)), key)
}
savePaths := make([]string, blobCnt)
saveIndexFile := func(idx int) error {
blob := blobs[idx]
savePath := getSavePathByKey(blob.Key)
saveIndexFileFn := func() error {
indexMeta, _, err := it.loadIndexMeta(ctx)
if err != nil {
log.Warn("IndexNode load meta failed", zap.String("path", it.req.MetaPath), zap.Error(err))
return err
}
if it.GetState() != TaskStateNormal {
log.Warn("IndexNode task state is not normal, skip task", zap.Int64("buildID", it.req.IndexBuildID))
return nil
}
if indexMeta.IndexVersion > it.req.Version {
log.Warn("IndexNode try saveIndexFile failed req.Version is low", zap.Any("req.Version", it.req.Version),
zap.Any("indexMeta.Version", indexMeta.IndexVersion))
return errors.New("This task has been reassigned, check indexMeta.version and request ")
}
return it.cm.Write(savePath, blob.Value)
}
err := retry.Do(ctx, saveIndexFileFn, retry.Attempts(5))
if err != nil {
log.Warn("IndexNode try saveIndexFile final", zap.Error(err), zap.Any("savePath", savePath))
return err
}
savePaths[idx] = savePath
return nil
}
// If an error occurs, return the error that the task state will be set to retry.
if err := funcutil.ProcessFuncParallel(blobCnt, runtime.NumCPU(), saveIndexFile, "saveIndexFile"); err != nil {
log.Error("saveIndexFile fail", zap.Int64("buildID", it.req.IndexBuildID))
return err
}
it.savePaths = savePaths
it.statistic.NumRows = int64(data.RowNum())
it.fieldID = fieldID
it.fieldData = data
return nil
}
func (it *IndexBuildTask) releaseMemory() {
debug.FreeOSMemory()
}
// Execute actually performs the task of building an index.
func (it *IndexBuildTask) Execute(ctx context.Context) error {
log.Debug("IndexNode IndexBuildTask Execute ...", zap.Int64("buildId", it.req.IndexBuildID))
sp, _ := trace.StartSpanFromContextWithOperationName(ctx, "CreateIndex-Execute")
defer sp.Finish()
state := it.GetState()
if state != TaskStateNormal {
log.Info("index task no need to execute", zap.Int64("buildID", it.req.IndexBuildID),
zap.String("index state", it.GetState().String()))
return nil
}
if err := it.prepareParams(ctx); err != nil {
it.SetState(TaskStateFailed)
log.Error("IndexNode IndexBuildTask Execute prepareParams failed",
zap.Int64("buildId", it.req.IndexBuildID),
zap.Error(err))
return err
}
defer it.releaseMemory()
var err error
var blobs []*storage.Blob
blobs, err = it.buildIndex(ctx)
if err != nil {
if errors.Is(err, ErrNoSuchKey) {
it.SetState(TaskStateFailed)
log.Error("IndexNode IndexBuildTask Execute buildIndex failed",
zap.Int64("buildId", it.req.IndexBuildID), zap.Error(err))
return err
}
it.SetState(TaskStateRetry)
log.Error("IndexNode IndexBuildTask Execute buildIndex failed, need to retry",
zap.Int64("buildId", it.req.IndexBuildID), zap.Error(err))
return err
}
err = it.saveIndex(ctx, blobs)
if err != nil {
it.SetState(TaskStateRetry)
return err
}
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
it.tr.Elapse("index building all done")
log.Info("IndexNode CreateIndex successfully ", zap.Int64("collect", it.collectionID),
zap.Int64("partition", it.partitionID), zap.Int64("segment", it.segmentID))
return nil
}
// func (it *indexBuildTask) Execute(ctx context.Context) error {
// logutil.Logger(it.ctx).Debug("IndexNode indexBuildTask Execute ...")
// sp, _ := trace.StartSpanFromContextWithOperationName(ctx, "CreateIndex-Execute")
// defer sp.Finish()
// select {
// case <-ctx.Done():
// logutil.Logger(it.ctx).Warn("build task was cancelled")
// return cancelErr
// default:
// if err := it.prepareParams(ctx); err != nil {
// it.SetState(commonpb.IndexState_Failed)
// logutil.Logger(it.ctx).Error("IndexNode indexBuildTask Execute prepareParams failed", zap.Error(err))
// return err
// }
// defer it.releaseMemory()
// blobs, err := it.buildIndex(ctx)
// if err != nil {
// if errors.Is(err, ErrNoSuchKey) {
// it.SetState(commonpb.IndexState_Failed)
// logutil.Logger(it.ctx).Error("IndexNode indexBuildTask Execute buildIndex failed", zap.Error(err))
// return err
// }
// it.SetState(commonpb.IndexState_Unissued)
// logutil.Logger(it.ctx).Error("IndexNode indexBuildTask Execute buildIndex failed, need to retry", zap.Error(err))
// return err
// }
// if err = it.saveIndex(ctx, blobs); err != nil {
// logutil.Logger(it.ctx).Warn("save index file failed", zap.Error(err))
// it.SetState(commonpb.IndexState_Unissued)
// return err
// }
// it.SetState(commonpb.IndexState_Finished)
// saveIndexFileDur := it.tr.Record("index file save done")
// metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
// it.tr.Elapse("index building all done")
// logutil.Logger(it.ctx).Info("IndexNode CreateIndex successfully ", zap.Int64("collect", it.collectionID),
// zap.Int64("partition", it.partitionID), zap.Int64("segment", it.segmentID))
// return nil
// }
// }

View File

@ -1,334 +1,104 @@
// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package indexnode
import (
"container/list"
"context"
"errors"
"runtime/debug"
"sync"
"github.com/milvus-io/milvus/internal/storage"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/opentracing/opentracing-go"
oplog "github.com/opentracing/opentracing-go/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/util/logutil"
"go.uber.org/zap"
)
// TaskQueue is a queue used to store tasks.
type TaskQueue interface {
utChan() <-chan int
utEmpty() bool
utFull() bool
addUnissuedTask(t task) error
//FrontUnissuedTask() task
PopUnissuedTask() task
AddActiveTask(t task)
PopActiveTask(tID UniqueID) task
Enqueue(t task) error
//tryToRemoveUselessIndexBuildTask(indexID UniqueID) []UniqueID
GetTaskNum() int
type taskScheduler struct {
taskchan chan task
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
// BaseTaskQueue is a basic instance of TaskQueue.
type BaseTaskQueue struct {
unissuedTasks *list.List
activeTasks map[UniqueID]task
utLock sync.Mutex
atLock sync.Mutex
// maxTaskNum should keep still
maxTaskNum int64
utBufChan chan int // to block scheduler
sched *TaskScheduler
}
func (queue *BaseTaskQueue) utChan() <-chan int {
return queue.utBufChan
}
func (queue *BaseTaskQueue) utEmpty() bool {
return queue.unissuedTasks.Len() == 0
}
func (queue *BaseTaskQueue) utFull() bool {
return int64(queue.unissuedTasks.Len()) >= queue.maxTaskNum
}
func (queue *BaseTaskQueue) addUnissuedTask(t task) error {
queue.utLock.Lock()
defer queue.utLock.Unlock()
if queue.utFull() {
return errors.New("IndexNode task queue is full")
func NewTaskScheduler(ctx context.Context, cap int) *taskScheduler {
newctx, cancel := context.WithCancel(ctx)
return &taskScheduler{
taskchan: make(chan task, cap),
ctx: newctx,
cancel: cancel,
}
queue.unissuedTasks.PushBack(t)
queue.utBufChan <- 1
return nil
}
//func (queue *BaseTaskQueue) FrontUnissuedTask() task {
// queue.utLock.Lock()
// defer queue.utLock.Unlock()
//
// if queue.unissuedTasks.Len() <= 0 {
// log.Debug("IndexNode FrontUnissuedTask sorry, but the unissued task list is empty!")
// return nil
// }
//
// return queue.unissuedTasks.Front().Value.(task)
//}
// PopUnissuedTask pops a task from tasks queue.
func (queue *BaseTaskQueue) PopUnissuedTask() task {
queue.utLock.Lock()
defer queue.utLock.Unlock()
if queue.unissuedTasks.Len() <= 0 {
return nil
}
ft := queue.unissuedTasks.Front()
queue.unissuedTasks.Remove(ft)
return ft.Value.(task)
}
// AddActiveTask adds a task to activeTasks.
func (queue *BaseTaskQueue) AddActiveTask(t task) {
queue.atLock.Lock()
defer queue.atLock.Unlock()
tID := t.ID()
_, ok := queue.activeTasks[tID]
if ok {
log.Debug("IndexNode task already in active task list", zap.Any("TaskID", tID))
}
queue.activeTasks[tID] = t
}
// PopActiveTask pops a task from activateTask and the task will be executed.
func (queue *BaseTaskQueue) PopActiveTask(tID UniqueID) task {
queue.atLock.Lock()
defer queue.atLock.Unlock()
t, ok := queue.activeTasks[tID]
if ok {
delete(queue.activeTasks, tID)
return t
}
log.Debug("IndexNode task was not found in the active task list", zap.Any("TaskID", tID))
return nil
}
//func (queue *BaseTaskQueue) tryToRemoveUselessIndexBuildTask(indexID UniqueID) []UniqueID {
// queue.utLock.Lock()
// defer queue.utLock.Unlock()
//
// var next *list.Element
// var indexBuildIDs []UniqueID
// for e := queue.unissuedTasks.Front(); e != nil; e = next {
// next = e.Next()
// indexBuildTask, ok := e.Value.(*IndexBuildTask)
// if !ok {
// continue
// }
// if indexBuildTask.req.IndexID == indexID {
// indexBuildIDs = append(indexBuildIDs, indexBuildTask.req.IndexBuildID)
// queue.unissuedTasks.Remove(e)
// indexBuildTask.Notify(nil)
// }
// }
// return indexBuildIDs
//}
// Enqueue adds a task to TaskQueue.
func (queue *BaseTaskQueue) Enqueue(t task) error {
err := t.OnEnqueue()
if err != nil {
func (s *taskScheduler) Enqueue(t task) error {
ctx := t.Ctx()
if err := t.OnEnqueue(ctx); err != nil {
return err
}
return queue.addUnissuedTask(t)
}
func (queue *BaseTaskQueue) GetTaskNum() int {
queue.utLock.Lock()
utNum := queue.unissuedTasks.Len()
queue.utLock.Unlock()
queue.atLock.Lock()
atNum := len(queue.activeTasks)
queue.atLock.Unlock()
return utNum + atNum
}
// IndexBuildTaskQueue is a task queue used to store building index tasks.
type IndexBuildTaskQueue struct {
BaseTaskQueue
}
// NewIndexBuildTaskQueue creates a new IndexBuildTaskQueue.
func NewIndexBuildTaskQueue(sched *TaskScheduler) *IndexBuildTaskQueue {
return &IndexBuildTaskQueue{
BaseTaskQueue: BaseTaskQueue{
unissuedTasks: list.New(),
activeTasks: make(map[UniqueID]task),
maxTaskNum: 1024,
utBufChan: make(chan int, 1024),
sched: sched,
},
select {
case <-ctx.Done():
return cancelErr
case s.taskchan <- t:
return nil
}
}
// TaskScheduler is a scheduler of indexing tasks.
type TaskScheduler struct {
IndexBuildQueue TaskQueue
buildParallel int
cm storage.ChunkManager
wg sync.WaitGroup
ctx context.Context
cancel context.CancelFunc
func (s *taskScheduler) GetPendingJob() int {
return len(s.taskchan)
}
// NewTaskScheduler creates a new task scheduler of indexing tasks.
func NewTaskScheduler(ctx context.Context,
cm storage.ChunkManager) (*TaskScheduler, error) {
ctx1, cancel := context.WithCancel(ctx)
s := &TaskScheduler{
cm: cm,
ctx: ctx1,
cancel: cancel,
buildParallel: Params.IndexNodeCfg.BuildParallel,
}
s.IndexBuildQueue = NewIndexBuildTaskQueue(s)
return s, nil
}
//func (sched *TaskScheduler) setParallelism(parallel int) {
// if parallel <= 0 {
// log.Debug("IndexNode can not set parallelism to less than zero!")
// return
// }
// sched.buildParallel = parallel
//}
func (sched *TaskScheduler) scheduleIndexBuildTask() []task {
ret := make([]task, 0)
for i := 0; i < sched.buildParallel; i++ {
t := sched.IndexBuildQueue.PopUnissuedTask()
if t == nil {
return ret
}
ret = append(ret, t)
}
return ret
}
func (sched *TaskScheduler) processTask(t task, q TaskQueue) {
span, ctx := trace.StartSpanFromContext(t.Ctx(),
opentracing.Tags{
"Type": t.Name(),
"ID": t.ID(),
})
defer span.Finish()
span.LogFields(oplog.Int64("scheduler process PreExecute", t.ID()))
err := t.PreExecute(ctx)
t.SetError(err)
if t.GetState() == TaskStateAbandon {
log.Info("IndexNode scheduler abandon task",
zap.String("TaskState", t.GetState().String()),
zap.Int64("taskID", t.ID()))
return
}
defer func() {
span.LogFields(oplog.Int64("scheduler process PostExecute", t.ID()))
err := t.PostExecute(ctx)
t.SetError(err)
}()
if err != nil {
trace.LogError(span, err)
return
}
span.LogFields(oplog.Int64("scheduler process AddActiveTask", t.ID()))
q.AddActiveTask(t)
// log.Printf("task add to active list ...")
defer func() {
span.LogFields(oplog.Int64("scheduler process PopActiveTask", t.ID()))
q.PopActiveTask(t.ID())
// log.Printf("pop from active list ...")
}()
span.LogFields(oplog.Int64("scheduler process Execute", t.ID()))
err = t.Execute(ctx)
t.SetError(err)
}
func (sched *TaskScheduler) indexBuildLoop() {
func (s *taskScheduler) indexBuildLoop() {
log.Debug("IndexNode TaskScheduler start build loop ...")
defer sched.wg.Done()
defer log.Warn("index build loop stopped")
defer s.wg.Done()
for {
select {
case <-sched.ctx.Done():
case <-s.ctx.Done():
return
case <-sched.IndexBuildQueue.utChan():
if !sched.IndexBuildQueue.utEmpty() {
tasks := sched.scheduleIndexBuildTask()
var wg sync.WaitGroup
for _, t := range tasks {
wg.Add(1)
go func(group *sync.WaitGroup, t task) {
defer group.Done()
sched.processTask(t, sched.IndexBuildQueue)
}(&wg, t)
}
wg.Wait()
case t, ok := <-s.taskchan:
if !ok {
log.Error("task chan closed unexpectedly")
return
}
s.doBuild(t)
}
}
}
// Start stats the task scheduler of indexing tasks.
func (sched *TaskScheduler) Start() error {
sched.wg.Add(1)
go sched.indexBuildLoop()
return nil
func (s *taskScheduler) doBuild(t task) {
wrap := func(fn func(ctx context.Context) error) error {
select {
case <-t.Ctx().Done():
return cancelErr
default:
return fn(t.Ctx())
}
}
defer func() {
t.Reset()
debug.FreeOSMemory()
}()
piplines := []func(context.Context) error{t.Prepare, t.LoadData, t.BuildIndex, t.SaveIndexFiles}
for _, fn := range piplines {
if err := wrap(fn); err != nil {
if err == cancelErr {
logutil.Logger(t.Ctx()).Warn("index build task cancelled", zap.String("task", t.Name()))
t.SetState(commonpb.IndexState_Abandoned)
} else if errors.Is(err, ErrNoSuchKey) {
t.SetState(commonpb.IndexState_Failed)
} else {
t.SetState(commonpb.IndexState_Unissued)
}
return
}
}
t.SetState(commonpb.IndexState_Finished)
}
// Close closes the task scheduler of indexing tasks.
func (sched *TaskScheduler) Close() {
sched.cancel()
sched.wg.Wait()
func (s *taskScheduler) Start() {
s.wg.Add(1)
go s.indexBuildLoop()
}
func (sched *TaskScheduler) GetTaskSlots() int {
return sched.buildParallel - sched.IndexBuildQueue.GetTaskNum()
func (s *taskScheduler) Close() {
s.cancel()
s.wg.Wait()
}

View File

@ -0,0 +1,225 @@
package indexnode
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/stretchr/testify/assert"
)
type fakeTaskState int
const (
fakeTaskInited = iota
fakeTaskEnqueued
fakeTaskPrepared
fakeTaskLoadedData
fakeTaskBuiltIndex
fakeTaskSavedIndexes
)
type stagectx struct {
mu sync.Mutex
curstate fakeTaskState
state2cancel fakeTaskState
ch chan struct{}
closeMu sync.Mutex
closed bool
mimeTimeout bool
}
var _ context.Context = &stagectx{}
func (s *stagectx) Deadline() (time.Time, bool) {
return time.Now(), false
}
func (s *stagectx) closeChannel() <-chan struct{} {
s.closeMu.Lock()
defer s.closeMu.Unlock()
if s.closed {
return s.ch
}
close(s.ch)
s.closed = true
return s.ch
}
func (s *stagectx) Done() <-chan struct{} {
if s.mimeTimeout {
<-time.After(time.Second * 3)
return s.closeChannel()
}
s.mu.Lock()
defer s.mu.Unlock()
if s.curstate == s.state2cancel {
return s.closeChannel()
}
return s.ch
}
func (s *stagectx) Err() error {
select {
case <-s.ch:
return fmt.Errorf("cancelled")
default:
return nil
}
}
func (s *stagectx) Value(k interface{}) interface{} {
return nil
}
func (s *stagectx) setState(state fakeTaskState) {
s.mu.Lock()
defer s.mu.Unlock()
s.curstate = state
}
var _taskwg sync.WaitGroup
type fakeTask struct {
id int
ctx context.Context
state fakeTaskState
reterr map[fakeTaskState]error
retstate commonpb.IndexState
expectedState commonpb.IndexState
}
var _ task = &fakeTask{}
func (t *fakeTask) Name() string {
return fmt.Sprintf("fake-task-%d", t.id)
}
func (t *fakeTask) Ctx() context.Context {
return t.ctx
}
func (t *fakeTask) OnEnqueue(ctx context.Context) error {
_taskwg.Add(1)
t.state = fakeTaskEnqueued
t.ctx.(*stagectx).setState(t.state)
return t.reterr[t.state]
}
func (t *fakeTask) Prepare(ctx context.Context) error {
t.state = fakeTaskPrepared
t.ctx.(*stagectx).setState(t.state)
return t.reterr[t.state]
}
func (t *fakeTask) LoadData(ctx context.Context) error {
t.state = fakeTaskLoadedData
t.ctx.(*stagectx).setState(t.state)
return t.reterr[t.state]
}
func (t *fakeTask) BuildIndex(ctx context.Context) error {
t.state = fakeTaskBuiltIndex
t.ctx.(*stagectx).setState(t.state)
return t.reterr[t.state]
}
func (t *fakeTask) SaveIndexFiles(ctx context.Context) error {
t.state = fakeTaskSavedIndexes
t.ctx.(*stagectx).setState(t.state)
return t.reterr[t.state]
}
func (t *fakeTask) Reset() {
_taskwg.Done()
}
func (t *fakeTask) SetState(state commonpb.IndexState) {
t.retstate = state
}
func (t *fakeTask) GetState() commonpb.IndexState {
return t.retstate
}
var (
idLock sync.Mutex
id = 0
)
func newTask(cancelStage fakeTaskState, reterror map[fakeTaskState]error, expectedState commonpb.IndexState) task {
idLock.Lock()
newId := id
id++
idLock.Unlock()
return &fakeTask{
reterr: reterror,
id: newId,
ctx: &stagectx{
curstate: fakeTaskInited,
state2cancel: cancelStage,
ch: make(chan struct{}),
},
state: fakeTaskInited,
retstate: commonpb.IndexState_IndexStateNone,
expectedState: expectedState,
}
}
func TestIndexTaskScheduler(t *testing.T) {
Params.Init()
scheduler := NewTaskScheduler(context.TODO(), 1024)
scheduler.Start()
tasks := make([]task, 0)
tasks = append(tasks,
newTask(fakeTaskLoadedData, nil, commonpb.IndexState_Abandoned),
newTask(fakeTaskPrepared, nil, commonpb.IndexState_Abandoned),
newTask(fakeTaskBuiltIndex, nil, commonpb.IndexState_Abandoned),
newTask(fakeTaskSavedIndexes, nil, commonpb.IndexState_Finished),
newTask(fakeTaskSavedIndexes, map[fakeTaskState]error{fakeTaskLoadedData: ErrNoSuchKey}, commonpb.IndexState_Failed),
newTask(fakeTaskSavedIndexes, map[fakeTaskState]error{fakeTaskSavedIndexes: fmt.Errorf("auth failed")}, commonpb.IndexState_Unissued))
for _, task := range tasks {
assert.Nil(t, scheduler.Enqueue(task))
}
_taskwg.Wait()
scheduler.Close()
scheduler.wg.Wait()
for _, task := range tasks[:len(tasks)-2] {
assert.Equal(t, task.GetState(), task.(*fakeTask).expectedState)
assert.Equal(t, task.Ctx().(*stagectx).curstate, task.Ctx().(*stagectx).state2cancel)
}
assert.Equal(t, tasks[len(tasks)-2].GetState(), tasks[len(tasks)-2].(*fakeTask).expectedState)
assert.Equal(t, tasks[len(tasks)-2].Ctx().(*stagectx).curstate, fakeTaskState(fakeTaskLoadedData))
assert.Equal(t, tasks[len(tasks)-1].GetState(), tasks[len(tasks)-1].(*fakeTask).expectedState)
assert.Equal(t, tasks[len(tasks)-1].Ctx().(*stagectx).curstate, fakeTaskState(fakeTaskSavedIndexes))
scheduler = NewTaskScheduler(context.TODO(), 1024)
tasks = make([]task, 0, 1024)
for i := 0; i < 1024; i++ {
tasks = append(tasks, newTask(fakeTaskSavedIndexes, nil, commonpb.IndexState_Finished))
assert.Nil(t, scheduler.Enqueue(tasks[len(tasks)-1]))
}
failTask := newTask(fakeTaskSavedIndexes, nil, commonpb.IndexState_Finished)
failTask.Ctx().(*stagectx).mimeTimeout = true
err := scheduler.Enqueue(failTask)
assert.Error(t, err)
failTask.Reset()
scheduler.Start()
_taskwg.Wait()
scheduler.Close()
scheduler.wg.Wait()
for _, task := range tasks {
assert.Equal(t, task.GetState(), commonpb.IndexState_Finished)
}
}

View File

@ -16,181 +16,181 @@
package indexnode
import (
"context"
"errors"
"math/rand"
"path"
"strconv"
"testing"
// import (
// "context"
// "errors"
// "math/rand"
// "path"
// "strconv"
// "testing"
"github.com/milvus-io/milvus/internal/kv"
// "github.com/milvus-io/milvus/internal/kv"
"github.com/golang/protobuf/proto"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/stretchr/testify/assert"
)
// "github.com/golang/protobuf/proto"
// etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
// "github.com/milvus-io/milvus/internal/proto/commonpb"
// "github.com/milvus-io/milvus/internal/proto/indexpb"
// "github.com/milvus-io/milvus/internal/storage"
// "github.com/milvus-io/milvus/internal/util/etcd"
// "github.com/milvus-io/milvus/internal/util/timerecord"
// "github.com/stretchr/testify/assert"
// )
func TestIndexBuildTask_saveIndexMeta(t *testing.T) {
Params.Init()
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.NoError(t, err)
assert.NotNil(t, etcdCli)
etcdKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
assert.NotNil(t, etcdKV)
indexBuildID := rand.Int63()
indexMeta := &indexpb.IndexMeta{
IndexBuildID: indexBuildID,
State: commonpb.IndexState_InProgress,
NodeID: 1,
IndexVersion: 1,
}
metaPath := path.Join("indexes", strconv.FormatInt(indexMeta.IndexBuildID, 10))
metaValue, err := proto.Marshal(indexMeta)
assert.NoError(t, err)
err = etcdKV.Save(metaPath, string(metaValue))
assert.NoError(t, err)
indexBuildTask := &IndexBuildTask{
BaseTask: BaseTask{
internalErr: errors.New("internal err"),
},
etcdKV: etcdKV,
req: &indexpb.CreateIndexRequest{
IndexBuildID: indexBuildID,
Version: 1,
MetaPath: metaPath,
},
tr: &timerecord.TimeRecorder{},
}
err = indexBuildTask.saveIndexMeta(context.Background())
assert.NoError(t, err)
// func TestIndexBuildTask_saveIndexMeta(t *testing.T) {
// Params.Init()
// etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
// assert.NoError(t, err)
// assert.NotNil(t, etcdCli)
// etcdKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
// assert.NotNil(t, etcdKV)
// indexBuildID := rand.Int63()
// indexMeta := &indexpb.IndexMeta{
// IndexBuildID: indexBuildID,
// State: commonpb.IndexState_InProgress,
// NodeID: 1,
// IndexVersion: 1,
// }
// metaPath := path.Join("indexes", strconv.FormatInt(indexMeta.IndexBuildID, 10))
// metaValue, err := proto.Marshal(indexMeta)
// assert.NoError(t, err)
// err = etcdKV.Save(metaPath, string(metaValue))
// assert.NoError(t, err)
// indexBuildTask := &IndexBuildTask{
// BaseTask: BaseTask{
// internalErr: errors.New("internal err"),
// },
// etcdKV: etcdKV,
// req: &indexpb.CreateIndexRequest{
// IndexBuildID: indexBuildID,
// Version: 1,
// MetaPath: metaPath,
// },
// tr: &timerecord.TimeRecorder{},
// }
// err = indexBuildTask.saveIndexMeta(context.Background())
// assert.NoError(t, err)
indexMeta2, _, err := indexBuildTask.loadIndexMeta(context.Background())
assert.NoError(t, err)
assert.NotNil(t, indexMeta2)
assert.Equal(t, commonpb.IndexState_Unissued, indexMeta2.State)
// indexMeta2, _, err := indexBuildTask.loadIndexMeta(context.Background())
// assert.NoError(t, err)
// assert.NotNil(t, indexMeta2)
// assert.Equal(t, commonpb.IndexState_Unissued, indexMeta2.State)
err = etcdKV.Remove(metaPath)
assert.NoError(t, err)
}
// err = etcdKV.Remove(metaPath)
// assert.NoError(t, err)
// }
type mockChunkManager struct {
storage.ChunkManager
// type mockChunkManager struct {
// storage.ChunkManager
read func(key string) ([]byte, error)
}
// read func(key string) ([]byte, error)
// }
func (mcm *mockChunkManager) Read(key string) ([]byte, error) {
return mcm.read(key)
}
// func (mcm *mockChunkManager) Read(key string) ([]byte, error) {
// return mcm.read(key)
// }
func TestIndexBuildTask_Execute(t *testing.T) {
t.Run("task retry", func(t *testing.T) {
indexTask := &IndexBuildTask{
cm: &mockChunkManager{
read: func(key string) ([]byte, error) {
return nil, errors.New("error occurred")
},
},
req: &indexpb.CreateIndexRequest{
IndexBuildID: 1,
DataPaths: []string{"path1", "path2"},
},
}
// func TestIndexBuildTask_Execute(t *testing.T) {
// t.Run("task retry", func(t *testing.T) {
// indexTask := &IndexBuildTask{
// cm: &mockChunkManager{
// read: func(key string) ([]byte, error) {
// return nil, errors.New("error occurred")
// },
// },
// req: &indexpb.CreateIndexRequest{
// IndexBuildID: 1,
// DataPaths: []string{"path1", "path2"},
// },
// }
err := indexTask.Execute(context.Background())
assert.Error(t, err)
assert.Equal(t, TaskStateRetry, indexTask.state)
})
// err := indexTask.Execute(context.Background())
// assert.Error(t, err)
// assert.Equal(t, TaskStateRetry, indexTask.state)
// })
t.Run("task failed", func(t *testing.T) {
indexTask := &IndexBuildTask{
cm: &mockChunkManager{
read: func(key string) ([]byte, error) {
return nil, ErrNoSuchKey
},
},
req: &indexpb.CreateIndexRequest{
IndexBuildID: 1,
DataPaths: []string{"path1", "path2"},
},
}
// t.Run("task failed", func(t *testing.T) {
// indexTask := &IndexBuildTask{
// cm: &mockChunkManager{
// read: func(key string) ([]byte, error) {
// return nil, ErrNoSuchKey
// },
// },
// req: &indexpb.CreateIndexRequest{
// IndexBuildID: 1,
// DataPaths: []string{"path1", "path2"},
// },
// }
err := indexTask.Execute(context.Background())
assert.ErrorIs(t, err, ErrNoSuchKey)
assert.Equal(t, TaskStateFailed, indexTask.state)
// err := indexTask.Execute(context.Background())
// assert.ErrorIs(t, err, ErrNoSuchKey)
// assert.Equal(t, TaskStateFailed, indexTask.state)
})
}
// })
// }
type mockETCDKV struct {
kv.MetaKv
// type mockETCDKV struct {
// kv.MetaKv
loadWithPrefix2 func(key string) ([]string, []string, []int64, error)
}
// loadWithPrefix2 func(key string) ([]string, []string, []int64, error)
// }
func (mk *mockETCDKV) LoadWithPrefix2(key string) ([]string, []string, []int64, error) {
return mk.loadWithPrefix2(key)
}
// func (mk *mockETCDKV) LoadWithPrefix2(key string) ([]string, []string, []int64, error) {
// return mk.loadWithPrefix2(key)
// }
func TestIndexBuildTask_loadIndexMeta(t *testing.T) {
t.Run("load empty meta", func(t *testing.T) {
indexTask := &IndexBuildTask{
etcdKV: &mockETCDKV{
loadWithPrefix2: func(key string) ([]string, []string, []int64, error) {
return []string{}, []string{}, []int64{}, nil
},
},
req: &indexpb.CreateIndexRequest{
IndexBuildID: 1,
DataPaths: []string{"path1", "path2"},
},
}
// func TestIndexBuildTask_loadIndexMeta(t *testing.T) {
// t.Run("load empty meta", func(t *testing.T) {
// indexTask := &IndexBuildTask{
// etcdKV: &mockETCDKV{
// loadWithPrefix2: func(key string) ([]string, []string, []int64, error) {
// return []string{}, []string{}, []int64{}, nil
// },
// },
// req: &indexpb.CreateIndexRequest{
// IndexBuildID: 1,
// DataPaths: []string{"path1", "path2"},
// },
// }
indexMeta, revision, err := indexTask.loadIndexMeta(context.Background())
assert.NoError(t, err)
assert.Equal(t, int64(0), revision)
assert.Equal(t, TaskStateAbandon, indexTask.GetState())
// indexMeta, revision, err := indexTask.loadIndexMeta(context.Background())
// assert.NoError(t, err)
// assert.Equal(t, int64(0), revision)
// assert.Equal(t, TaskStateAbandon, indexTask.GetState())
indexTask.updateTaskState(indexMeta, nil)
assert.Equal(t, TaskStateAbandon, indexTask.GetState())
})
}
// indexTask.updateTaskState(indexMeta, nil)
// assert.Equal(t, TaskStateAbandon, indexTask.GetState())
// })
// }
func TestIndexBuildTask_saveIndex(t *testing.T) {
t.Run("save index failed", func(t *testing.T) {
indexTask := &IndexBuildTask{
etcdKV: &mockETCDKV{
loadWithPrefix2: func(key string) ([]string, []string, []int64, error) {
return []string{}, []string{}, []int64{}, errors.New("error")
},
},
partitionID: 1,
segmentID: 1,
req: &indexpb.CreateIndexRequest{
IndexBuildID: 1,
DataPaths: []string{"path1", "path2"},
Version: 1,
},
}
// func TestIndexBuildTask_saveIndex(t *testing.T) {
// t.Run("save index failed", func(t *testing.T) {
// indexTask := &IndexBuildTask{
// etcdKV: &mockETCDKV{
// loadWithPrefix2: func(key string) ([]string, []string, []int64, error) {
// return []string{}, []string{}, []int64{}, errors.New("error")
// },
// },
// partitionID: 1,
// segmentID: 1,
// req: &indexpb.CreateIndexRequest{
// IndexBuildID: 1,
// DataPaths: []string{"path1", "path2"},
// Version: 1,
// },
// }
blobs := []*storage.Blob{
{
Key: "key1",
Value: []byte("value1"),
},
{
Key: "key2",
Value: []byte("value2"),
},
}
// blobs := []*storage.Blob{
// {
// Key: "key1",
// Value: []byte("value1"),
// },
// {
// Key: "key2",
// Value: []byte("value2"),
// },
// }
err := indexTask.saveIndex(context.Background(), blobs)
assert.Error(t, err)
})
}
// err := indexTask.saveIndex(context.Background(), blobs)
// assert.Error(t, err)
// })
// }

View File

@ -0,0 +1,83 @@
package indexnode
import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/indexnodepb"
)
func (i *IndexNode) loadOrStoreTask(clusterID, buildID UniqueID, info *taskInfo) *taskInfo {
i.stateLock.Lock()
defer i.stateLock.Unlock()
key := taskKey{ClusterID: clusterID, BuildID: buildID}
oldInfo, ok := i.tasks[key]
if ok {
return oldInfo
}
i.tasks[key] = info
return nil
}
func (i *IndexNode) loadTaskState(clusterID, buildID UniqueID) (commonpb.IndexState, bool) {
key := taskKey{ClusterID: clusterID, BuildID: buildID}
i.stateLock.Lock()
defer i.stateLock.Unlock()
task, ok := i.tasks[key]
return task.state, ok
}
func (i *IndexNode) storeTaskState(clusterID, buildID UniqueID, state commonpb.IndexState) {
key := taskKey{ClusterID: clusterID, BuildID: buildID}
i.stateLock.Lock()
defer i.stateLock.Unlock()
if task, ok := i.tasks[key]; ok {
task.state = state
}
}
func (i *IndexNode) foreachTaskInfo(fn func(clusterID, buildID UniqueID, info *taskInfo)) {
i.stateLock.Lock()
defer i.stateLock.Unlock()
for key, info := range i.tasks {
fn(key.ClusterID, key.BuildID, info)
}
}
func (i *IndexNode) storeIndexFilesAndStatistic(clusterID, buildID UniqueID, files []string, statistic *indexnodepb.JobInfo) {
key := taskKey{ClusterID: clusterID, BuildID: buildID}
i.stateLock.Lock()
defer i.stateLock.Unlock()
if info, ok := i.tasks[key]; !ok {
return
} else {
info.indexfiles = files[:]
info.statistic = proto.Clone(statistic).(*indexnodepb.JobInfo)
}
}
func (i *IndexNode) deleteTaskInfos(keys []taskKey) []*taskInfo {
i.stateLock.Lock()
defer i.stateLock.Unlock()
deleted := make([]*taskInfo, 0, len(keys))
for _, key := range keys {
info, ok := i.tasks[key]
if ok {
deleted = append(deleted, info)
delete(i.tasks, key)
}
}
return deleted
}
func (i *IndexNode) deleteAllTasks() []*taskInfo {
i.stateLock.Lock()
deletedTasks := i.tasks
i.tasks = make(map[taskKey]*taskInfo)
i.stateLock.Unlock()
deleted := make([]*taskInfo, 0, len(deletedTasks))
for _, info := range deletedTasks {
deleted = append(deleted, info)
}
return deleted
}

View File

@ -69,6 +69,7 @@ enum IndexState {
InProgress = 2;
Finished = 3;
Failed = 4;
Abandoned = 5;
}
enum SegmentState {

View File

@ -193,6 +193,7 @@ const (
IndexState_InProgress IndexState = 2
IndexState_Finished IndexState = 3
IndexState_Failed IndexState = 4
IndexState_Abandoned IndexState = 5
)
var IndexState_name = map[int32]string{
@ -201,6 +202,7 @@ var IndexState_name = map[int32]string{
2: "InProgress",
3: "Finished",
4: "Failed",
5: "Abandoned",
}
var IndexState_value = map[string]int32{
@ -209,6 +211,7 @@ var IndexState_value = map[string]int32{
"InProgress": 2,
"Finished": 3,
"Failed": 4,
"Abandoned": 5,
}
func (x IndexState) String() string {
@ -1317,150 +1320,151 @@ func init() {
func init() { proto.RegisterFile("common.proto", fileDescriptor_555bd8c177793206) }
var fileDescriptor_555bd8c177793206 = []byte{
// 2318 bytes of a gzipped FileDescriptorProto
// 2329 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x49, 0x73, 0x24, 0x47,
0x15, 0x56, 0xa9, 0x7b, 0xd4, 0xea, 0xec, 0x92, 0x94, 0x93, 0x9a, 0xd1, 0xc8, 0xe3, 0xb1, 0x2d,
0x37, 0xb6, 0x11, 0x02, 0x4b, 0x60, 0x47, 0x00, 0x41, 0x84, 0x89, 0x90, 0xba, 0x25, 0x4d, 0x87,
0xb5, 0x34, 0x25, 0xc9, 0x76, 0x10, 0x01, 0x8a, 0x54, 0xd5, 0x53, 0xab, 0x3c, 0x55, 0x95, 0x45,
0x66, 0xb6, 0x46, 0xcd, 0xc9, 0x98, 0x3f, 0x00, 0xe6, 0x0f, 0xf0, 0x03, 0xd8, 0x17, 0xc3, 0x91,
0x1d, 0x9b, 0xed, 0xc2, 0x85, 0xcd, 0xc0, 0x11, 0xee, 0xac, 0x5e, 0x89, 0x97, 0x59, 0x9b, 0x64,
0x19, 0x0e, 0xdc, 0x2a, 0xbf, 0xf7, 0xf2, 0x6d, 0xf9, 0x96, 0xcc, 0x22, 0xae, 0x2f, 0xe2, 0x58,
0x24, 0xcb, 0xa9, 0x14, 0x5a, 0xb0, 0xd9, 0x38, 0x8c, 0x4e, 0x87, 0xca, 0xae, 0x96, 0x2d, 0xe9,
0xe6, 0xc2, 0x40, 0x88, 0x41, 0x04, 0x2b, 0x06, 0x3c, 0x1a, 0x1e, 0xaf, 0x04, 0xa0, 0x7c, 0x19,
0xa6, 0x5a, 0x48, 0xcb, 0xd8, 0x3e, 0x24, 0x13, 0x7b, 0x9a, 0xeb, 0xa1, 0x62, 0x4f, 0x10, 0x02,
0x52, 0x0a, 0x79, 0xe8, 0x8b, 0x00, 0xe6, 0x9d, 0x05, 0x67, 0x71, 0xfa, 0xb1, 0xfb, 0x97, 0x2f,
0x91, 0xba, 0xbc, 0x8e, 0x6c, 0x1d, 0x11, 0x80, 0xd7, 0x84, 0xfc, 0x93, 0xcd, 0x91, 0x09, 0x09,
0x5c, 0x89, 0x64, 0x7e, 0x7c, 0xc1, 0x59, 0x6c, 0x7a, 0xd9, 0xaa, 0xfd, 0x41, 0xe2, 0x3e, 0x09,
0xa3, 0xa7, 0x78, 0x34, 0x84, 0x3e, 0x0f, 0x25, 0xa3, 0xa4, 0x76, 0x07, 0x46, 0x46, 0x7e, 0xd3,
0xc3, 0x4f, 0x76, 0x8d, 0x5c, 0x39, 0x45, 0x72, 0xb6, 0xd1, 0x2e, 0xda, 0x8f, 0x93, 0xd6, 0x93,
0x30, 0xea, 0x72, 0xcd, 0xdf, 0x61, 0x1b, 0x23, 0xf5, 0x80, 0x6b, 0x6e, 0x76, 0xb9, 0x9e, 0xf9,
0x6e, 0xdf, 0x22, 0xf5, 0xb5, 0x48, 0x1c, 0x95, 0x22, 0x1d, 0x43, 0xcc, 0x44, 0x9e, 0x12, 0xda,
0x8f, 0xb8, 0x0f, 0x27, 0x22, 0x0a, 0x40, 0x1a, 0x93, 0x50, 0xae, 0xe6, 0x83, 0x5c, 0xae, 0xe6,
0x03, 0xf6, 0x61, 0x52, 0xd7, 0xa3, 0xd4, 0x5a, 0x33, 0xfd, 0xd8, 0x43, 0x97, 0x46, 0xa0, 0x22,
0x66, 0x7f, 0x94, 0x82, 0x67, 0x76, 0x60, 0x08, 0x8c, 0x22, 0x35, 0x5f, 0x5b, 0xa8, 0x2d, 0xba,
0x5e, 0xb6, 0x6a, 0x7f, 0xe2, 0x9c, 0xde, 0x4d, 0x29, 0x86, 0x29, 0xeb, 0x11, 0x37, 0x2d, 0x31,
0x35, 0xef, 0x2c, 0xd4, 0x16, 0x5b, 0x8f, 0x3d, 0xfc, 0xbf, 0xb4, 0x19, 0xa3, 0xbd, 0x73, 0x5b,
0xdb, 0x8f, 0x92, 0xc6, 0x6a, 0x10, 0x48, 0x50, 0x8a, 0x4d, 0x93, 0xf1, 0x30, 0xcd, 0x9c, 0x19,
0x0f, 0x53, 0x8c, 0x51, 0x2a, 0xa4, 0x36, 0xbe, 0xd4, 0x3c, 0xf3, 0xdd, 0x7e, 0xc1, 0x21, 0x8d,
0x6d, 0x35, 0x58, 0xe3, 0x0a, 0xd8, 0x87, 0xc8, 0x64, 0xac, 0x06, 0x87, 0xc6, 0x5f, 0x7b, 0xe2,
0xb7, 0x2e, 0xb5, 0x60, 0x5b, 0x0d, 0x8c, 0x9f, 0x8d, 0xd8, 0x7e, 0x60, 0x80, 0x63, 0x35, 0xe8,
0x75, 0x33, 0xc9, 0x76, 0xc1, 0x6e, 0x91, 0xa6, 0x0e, 0x63, 0x50, 0x9a, 0xc7, 0xe9, 0x7c, 0x6d,
0xc1, 0x59, 0xac, 0x7b, 0x25, 0xc0, 0x6e, 0x92, 0x49, 0x25, 0x86, 0xd2, 0x87, 0x5e, 0x77, 0xbe,
0x6e, 0xb6, 0x15, 0xeb, 0xf6, 0x13, 0xa4, 0xb9, 0xad, 0x06, 0xb7, 0x81, 0x07, 0x20, 0xd9, 0xfb,
0x49, 0xfd, 0x88, 0x2b, 0x6b, 0x51, 0xeb, 0x9d, 0x2d, 0x42, 0x0f, 0x3c, 0xc3, 0xd9, 0xfe, 0x24,
0x71, 0xbb, 0xdb, 0x5b, 0xff, 0x87, 0x04, 0x34, 0x5d, 0x9d, 0x70, 0x19, 0xec, 0xf0, 0x38, 0x4f,
0xc4, 0x12, 0x68, 0xbf, 0xe2, 0x10, 0xb7, 0x2f, 0xc3, 0xd3, 0x30, 0x82, 0x01, 0xac, 0x9f, 0x69,
0xb6, 0x41, 0xa6, 0x24, 0x58, 0xeb, 0xab, 0xd1, 0x7b, 0xf0, 0x52, 0x4d, 0x5e, 0xc6, 0x69, 0x42,
0xe8, 0xca, 0xca, 0x8a, 0x1d, 0x10, 0x56, 0xc8, 0x49, 0x73, 0x05, 0x59, 0xea, 0x3d, 0xf2, 0x5f,
0x85, 0x15, 0xe6, 0x78, 0x57, 0xe5, 0x45, 0x88, 0x2d, 0x93, 0xd9, 0x42, 0x6c, 0xc2, 0x63, 0x38,
0x0c, 0x93, 0x00, 0xce, 0xcc, 0x91, 0x5c, 0x29, 0xf9, 0xd1, 0xb5, 0x1e, 0x12, 0x96, 0x7e, 0x3d,
0x49, 0x9a, 0x45, 0x55, 0xb3, 0x16, 0x69, 0xec, 0x0d, 0x7d, 0x1f, 0x94, 0xa2, 0x63, 0x6c, 0x96,
0xcc, 0x1c, 0x24, 0x70, 0x96, 0x82, 0xaf, 0x21, 0x30, 0x3c, 0xd4, 0x61, 0x57, 0xc9, 0x54, 0x47,
0x24, 0x09, 0xf8, 0x7a, 0x83, 0x87, 0x11, 0x04, 0x74, 0x9c, 0x5d, 0x23, 0xb4, 0x0f, 0x32, 0x0e,
0x95, 0x0a, 0x45, 0xd2, 0x85, 0x24, 0x84, 0x80, 0xd6, 0xd8, 0x0d, 0x32, 0xdb, 0x11, 0x51, 0x04,
0xbe, 0x0e, 0x45, 0xb2, 0x23, 0xf4, 0xfa, 0x59, 0xa8, 0xb4, 0xa2, 0x75, 0x14, 0xdb, 0x8b, 0x22,
0x18, 0xf0, 0x68, 0x55, 0x0e, 0x86, 0x31, 0x24, 0x9a, 0x5e, 0x41, 0x19, 0x19, 0xd8, 0x0d, 0x63,
0x48, 0x50, 0x12, 0x6d, 0x54, 0x50, 0x63, 0x2c, 0xc6, 0x8d, 0x4e, 0xb2, 0x7b, 0xc8, 0xf5, 0x0c,
0xad, 0x28, 0xe0, 0x31, 0xd0, 0x26, 0x9b, 0x21, 0xad, 0x8c, 0xb4, 0xbf, 0xdb, 0x7f, 0x92, 0x92,
0x8a, 0x04, 0x4f, 0xdc, 0xf5, 0xc0, 0x17, 0x32, 0xa0, 0xad, 0x8a, 0x09, 0x4f, 0x81, 0xaf, 0x85,
0xec, 0x75, 0xa9, 0x8b, 0x06, 0x67, 0xe0, 0x1e, 0x70, 0xe9, 0x9f, 0x78, 0xa0, 0x86, 0x91, 0xa6,
0x53, 0x8c, 0x12, 0x77, 0x23, 0x8c, 0x60, 0x47, 0xe8, 0x0d, 0x31, 0x4c, 0x02, 0x3a, 0xcd, 0xa6,
0x09, 0xd9, 0x06, 0xcd, 0xb3, 0x08, 0xcc, 0xa0, 0xda, 0x0e, 0xf7, 0x4f, 0x20, 0x03, 0x28, 0x9b,
0x23, 0xac, 0xc3, 0x93, 0x44, 0xe8, 0x8e, 0x04, 0xae, 0x61, 0xc3, 0xd4, 0x2b, 0xbd, 0x8a, 0xe6,
0x9c, 0xc3, 0xc3, 0x08, 0x28, 0x2b, 0xb9, 0xbb, 0x10, 0x41, 0xc1, 0x3d, 0x5b, 0x72, 0x67, 0x38,
0x72, 0x5f, 0x43, 0xe3, 0xd7, 0x86, 0x61, 0x14, 0x98, 0x90, 0xd8, 0x63, 0xb9, 0x8e, 0x36, 0x66,
0xc6, 0xef, 0x6c, 0xf5, 0xf6, 0xf6, 0xe9, 0x1c, 0xbb, 0x4e, 0xae, 0x66, 0xc8, 0x36, 0x68, 0x19,
0xfa, 0x26, 0x78, 0x37, 0xd0, 0xd4, 0xdd, 0xa1, 0xde, 0x3d, 0xde, 0x86, 0x58, 0xc8, 0x11, 0x9d,
0xc7, 0x03, 0x35, 0x92, 0xf2, 0x23, 0xa2, 0xf7, 0xa0, 0x86, 0xf5, 0x38, 0xd5, 0xa3, 0x32, 0xbc,
0xf4, 0x26, 0xbb, 0x97, 0xdc, 0x38, 0x48, 0x03, 0xae, 0xa1, 0x17, 0x63, 0x33, 0xd9, 0xe7, 0xea,
0x0e, 0xba, 0x3b, 0x94, 0x40, 0xef, 0x65, 0x37, 0xc9, 0xdc, 0xf9, 0xb3, 0x28, 0x82, 0x75, 0x0b,
0x37, 0x5a, 0x6f, 0x3b, 0x12, 0x02, 0x48, 0x74, 0xc8, 0xa3, 0x7c, 0xe3, 0x7d, 0xa5, 0xd4, 0xb7,
0x13, 0xef, 0x47, 0xa2, 0xf5, 0xfc, 0xed, 0xc4, 0x07, 0xd8, 0x3c, 0xb9, 0xb6, 0x09, 0xfa, 0xed,
0x94, 0x05, 0xa4, 0x6c, 0x85, 0xca, 0x90, 0x0e, 0x14, 0x48, 0x95, 0x53, 0x1e, 0x64, 0x8c, 0x4c,
0x6f, 0x82, 0x46, 0x30, 0xc7, 0xda, 0x18, 0x27, 0x6b, 0x9e, 0x27, 0x22, 0xc8, 0xe1, 0x77, 0x61,
0x0c, 0xba, 0x52, 0xa4, 0x55, 0xf0, 0x21, 0x74, 0x73, 0x37, 0x05, 0xc9, 0x35, 0xa0, 0x8c, 0x2a,
0xed, 0x61, 0x94, 0xb3, 0x07, 0x18, 0x81, 0x2a, 0xfc, 0x48, 0x09, 0x57, 0xb5, 0xbe, 0x1b, 0x73,
0x38, 0xe3, 0xce, 0x2a, 0x32, 0x27, 0x2d, 0xa2, 0xd7, 0x99, 0x92, 0xa2, 0xaa, 0x73, 0xe2, 0x7b,
0x30, 0x55, 0xec, 0xbe, 0x4d, 0xc9, 0x13, 0x9d, 0xe3, 0x4b, 0xec, 0x41, 0x72, 0x9f, 0x07, 0xc7,
0x12, 0xd4, 0x49, 0x5f, 0x44, 0xa1, 0x3f, 0xea, 0x25, 0xc7, 0xa2, 0x48, 0x49, 0x64, 0x79, 0x2f,
0x5a, 0x82, 0x61, 0xb1, 0xf4, 0x1c, 0x7e, 0x1f, 0xc6, 0x64, 0x47, 0xe8, 0x3d, 0x6c, 0x78, 0x5b,
0xa6, 0x85, 0xd2, 0x47, 0x51, 0xcb, 0x8e, 0xf0, 0x20, 0x8d, 0x42, 0x9f, 0xaf, 0x9e, 0xf2, 0x30,
0xe2, 0x47, 0x11, 0xd0, 0x65, 0x0c, 0xca, 0x1e, 0x0c, 0xb0, 0x64, 0x8b, 0xf3, 0x5d, 0x61, 0x8c,
0x4c, 0x75, 0xbb, 0x1e, 0x7c, 0x6a, 0x08, 0x4a, 0x7b, 0xdc, 0x07, 0xfa, 0x97, 0xc6, 0xd2, 0x33,
0x84, 0x98, 0xa4, 0xc2, 0x0b, 0x06, 0xa0, 0x8a, 0x72, 0xb5, 0x23, 0x12, 0xa0, 0x63, 0xcc, 0x25,
0x93, 0x07, 0x49, 0xa8, 0xd4, 0x10, 0x02, 0xea, 0x60, 0x41, 0xf5, 0x92, 0xbe, 0x14, 0x03, 0x9c,
0x65, 0x74, 0x1c, 0xa9, 0x1b, 0x61, 0x12, 0xaa, 0x13, 0xd3, 0x4a, 0x08, 0x99, 0xc8, 0x2a, 0xab,
0xbe, 0xf4, 0xbc, 0x43, 0xdc, 0xcc, 0x06, 0x2b, 0xfc, 0x1a, 0xa1, 0xd5, 0x75, 0x29, 0xbe, 0x48,
0x68, 0x07, 0xdb, 0xda, 0xa6, 0x14, 0x77, 0xc3, 0x64, 0x40, 0xc7, 0x51, 0xda, 0x1e, 0xf0, 0xc8,
0x48, 0x6e, 0x91, 0xc6, 0x46, 0x34, 0x34, 0x6a, 0xea, 0x46, 0x29, 0x2e, 0x90, 0xed, 0x0a, 0x92,
0x30, 0x01, 0x52, 0x08, 0xe8, 0x04, 0x9b, 0x22, 0x4d, 0x9b, 0xf6, 0x48, 0x6b, 0x2c, 0x7d, 0x94,
0xcc, 0x5c, 0xb8, 0x07, 0xb0, 0x49, 0x52, 0xcf, 0x54, 0x53, 0xe2, 0xae, 0x85, 0x09, 0x97, 0x23,
0xdb, 0x5b, 0x68, 0x80, 0x35, 0xb7, 0x11, 0x09, 0xae, 0x33, 0x00, 0x96, 0x5e, 0x74, 0xcd, 0x20,
0x36, 0x1b, 0xa7, 0x48, 0xf3, 0x20, 0x09, 0xe0, 0x38, 0x4c, 0x20, 0xa0, 0x63, 0xa6, 0xe6, 0x6d,
0xb5, 0x94, 0xc5, 0x17, 0x60, 0x04, 0xd1, 0x98, 0x0a, 0x06, 0x58, 0xb8, 0xb7, 0xb9, 0xaa, 0x40,
0xc7, 0x78, 0x6e, 0x5d, 0x73, 0xcd, 0x3b, 0xaa, 0x6e, 0x1f, 0x98, 0x73, 0x3b, 0x11, 0x77, 0x4b,
0x4c, 0xd1, 0x13, 0xd4, 0xb4, 0x09, 0x7a, 0x6f, 0xa4, 0x34, 0xc4, 0x1d, 0x91, 0x1c, 0x87, 0x03,
0x45, 0x43, 0xd4, 0xb4, 0x25, 0x78, 0x50, 0xd9, 0xfe, 0x2c, 0x66, 0x8e, 0x07, 0x11, 0x70, 0x55,
0x95, 0x7a, 0xc7, 0x74, 0x3d, 0x63, 0xea, 0x6a, 0x14, 0x72, 0x45, 0x23, 0x74, 0x05, 0xad, 0xb4,
0xcb, 0x18, 0x0f, 0x75, 0x35, 0xd2, 0x20, 0xed, 0x3a, 0x61, 0xd7, 0xc8, 0x8c, 0xe5, 0xef, 0x73,
0xa9, 0x43, 0x23, 0xe4, 0x25, 0xc7, 0xa4, 0x8f, 0x14, 0x69, 0x89, 0xbd, 0x8c, 0x43, 0xc6, 0xbd,
0xcd, 0x55, 0x09, 0xfd, 0xcc, 0x61, 0x73, 0xe4, 0x6a, 0xee, 0x5a, 0x89, 0xff, 0xdc, 0x61, 0xb3,
0x64, 0x1a, 0x5d, 0x2b, 0x30, 0x45, 0x7f, 0x61, 0x40, 0x74, 0xa2, 0x02, 0xfe, 0xd2, 0x48, 0xc8,
0xbc, 0xa8, 0xe0, 0xbf, 0x32, 0xca, 0x50, 0x42, 0x96, 0x44, 0x8a, 0xbe, 0xea, 0xa0, 0xa5, 0xb9,
0xb2, 0x0c, 0xa6, 0xaf, 0x19, 0x46, 0x94, 0x5a, 0x30, 0xbe, 0x6e, 0x18, 0x33, 0x99, 0x05, 0xfa,
0x86, 0x41, 0x6f, 0xf3, 0x24, 0x10, 0xc7, 0xc7, 0x05, 0xfa, 0xa6, 0xc3, 0xe6, 0xc9, 0x2c, 0x6e,
0x5f, 0xe3, 0x11, 0x4f, 0xfc, 0x92, 0xff, 0x2d, 0x87, 0x5d, 0x27, 0xf4, 0x82, 0x3a, 0x45, 0x9f,
0x1b, 0x67, 0x34, 0x8f, 0xaf, 0x29, 0x1e, 0xfa, 0xa5, 0x71, 0x13, 0xab, 0x8c, 0xd1, 0x62, 0x5f,
0x1e, 0x67, 0xd3, 0x36, 0xe8, 0x76, 0xfd, 0x95, 0x71, 0xd6, 0x22, 0x13, 0xbd, 0x44, 0x81, 0xd4,
0xf4, 0x73, 0x98, 0xdf, 0x13, 0xb6, 0x83, 0xd2, 0xcf, 0x63, 0x19, 0x5d, 0x31, 0xf9, 0x4d, 0x5f,
0xc0, 0xe9, 0xcc, 0x3c, 0x50, 0x90, 0x04, 0x95, 0xda, 0x51, 0xf4, 0x0b, 0x66, 0x87, 0x1d, 0x7f,
0xf4, 0x6f, 0x35, 0x13, 0x9a, 0xea, 0x2c, 0xfc, 0x7b, 0x0d, 0x4d, 0xd8, 0x04, 0x5d, 0x96, 0x33,
0xfd, 0x47, 0x8d, 0xdd, 0x24, 0xd7, 0x73, 0xcc, 0x4c, 0xa6, 0xa2, 0x90, 0xff, 0x59, 0x63, 0xb7,
0xc8, 0x0d, 0x6c, 0xd3, 0x45, 0xde, 0xe0, 0xa6, 0x50, 0xe9, 0xd0, 0x57, 0xf4, 0x5f, 0x35, 0x76,
0x2f, 0x99, 0xdb, 0x04, 0x5d, 0x9c, 0x47, 0x85, 0xf8, 0xef, 0x1a, 0x9b, 0x22, 0x93, 0x1e, 0x8e,
0x2e, 0x38, 0x05, 0xfa, 0x6a, 0x0d, 0x0f, 0x35, 0x5f, 0x66, 0xe6, 0xbc, 0x56, 0xc3, 0x50, 0x3f,
0xcd, 0xb5, 0x7f, 0xd2, 0x8d, 0x3b, 0x27, 0x3c, 0x49, 0x20, 0x52, 0xf4, 0xf5, 0x1a, 0x06, 0xd4,
0x83, 0x58, 0x9c, 0x42, 0x05, 0x7e, 0xc3, 0x38, 0x6d, 0x98, 0x3f, 0x36, 0x04, 0x39, 0x2a, 0x08,
0x6f, 0xd6, 0xf0, 0x68, 0x2c, 0xff, 0x79, 0xca, 0x5b, 0x35, 0x76, 0x1f, 0x99, 0xb7, 0xcd, 0x22,
0x3f, 0x18, 0x24, 0x0e, 0x00, 0xdb, 0x2b, 0x7d, 0xae, 0x5e, 0x48, 0xec, 0x42, 0xa4, 0x79, 0xb1,
0xef, 0x33, 0x75, 0xb4, 0x0b, 0x8b, 0xab, 0xec, 0xaa, 0x8a, 0x3e, 0x5f, 0xc7, 0x13, 0xdd, 0x04,
0x9d, 0x35, 0x56, 0x45, 0x3f, 0x6b, 0x90, 0x4c, 0xb2, 0x11, 0xf9, 0x9b, 0x3a, 0x9b, 0x21, 0xc4,
0xd6, 0xa4, 0x01, 0x7e, 0x9b, 0x8b, 0xc2, 0xbb, 0xcb, 0x29, 0x48, 0xd3, 0xd8, 0xe9, 0xef, 0x0a,
0x05, 0x95, 0xce, 0x47, 0x7f, 0x5f, 0xc7, 0x90, 0xed, 0x87, 0x31, 0xec, 0x87, 0xfe, 0x1d, 0xfa,
0xb5, 0x26, 0x86, 0xcc, 0x78, 0xb4, 0x23, 0x02, 0xb0, 0x27, 0xfc, 0xf5, 0x26, 0x26, 0x0c, 0xe6,
0xa1, 0x4d, 0x98, 0x6f, 0x98, 0x75, 0xd6, 0xbd, 0x7b, 0x5d, 0xfa, 0x4d, 0xbc, 0x43, 0x91, 0x6c,
0xbd, 0xbf, 0xb7, 0x4b, 0xbf, 0xd5, 0x44, 0x55, 0xab, 0x51, 0x24, 0x7c, 0xae, 0x8b, 0x6a, 0xf8,
0x76, 0x13, 0xcb, 0xa9, 0xa2, 0x3d, 0x3b, 0xb5, 0x17, 0x9b, 0x18, 0xfb, 0x0c, 0x37, 0xc9, 0xd6,
0xc5, 0xa6, 0xf8, 0x1d, 0x23, 0x15, 0x5f, 0x74, 0x68, 0xc9, 0xbe, 0xa6, 0xdf, 0x35, 0x7c, 0x17,
0xaf, 0x05, 0xf4, 0x0f, 0xad, 0x2c, 0xbf, 0x2a, 0xd8, 0x2b, 0x2d, 0x5b, 0x1f, 0xe7, 0xef, 0x01,
0xf4, 0x8f, 0x06, 0xbe, 0x78, 0x77, 0xa0, 0x7f, 0x6a, 0xa1, 0x61, 0xd5, 0xf1, 0x8f, 0x97, 0x60,
0x45, 0xff, 0xdc, 0x42, 0x0b, 0xca, 0x41, 0x4f, 0xbf, 0xe7, 0x62, 0xb0, 0xf2, 0x11, 0x4f, 0xbf,
0xef, 0xa2, 0x9b, 0x17, 0x86, 0x3b, 0xfd, 0x81, 0x6b, 0x8e, 0xa3, 0x18, 0xeb, 0xf4, 0x87, 0x15,
0x00, 0xb9, 0xe8, 0x8f, 0x5c, 0xd3, 0x81, 0xce, 0x8d, 0x72, 0xfa, 0x63, 0x17, 0x6d, 0xbb, 0x38,
0xc4, 0xe9, 0x4f, 0x5c, 0x7b, 0xdc, 0xc5, 0xf8, 0xa6, 0x3f, 0x75, 0xb1, 0x02, 0x2e, 0x1f, 0xdc,
0xf4, 0x25, 0xa3, 0xab, 0x1c, 0xd9, 0xf4, 0x65, 0x77, 0xa9, 0x4d, 0x1a, 0x5d, 0x15, 0x99, 0xb9,
0xd1, 0x20, 0xb5, 0xae, 0x8a, 0xe8, 0x18, 0xb6, 0xd9, 0x35, 0x21, 0xa2, 0xf5, 0xb3, 0x54, 0x3e,
0xf5, 0x01, 0xea, 0x2c, 0xad, 0x91, 0x99, 0x8e, 0x88, 0x53, 0x5e, 0x94, 0x9b, 0x19, 0x15, 0x76,
0xc6, 0x40, 0x60, 0x53, 0x65, 0x0c, 0x7b, 0xf5, 0xfa, 0x19, 0xf8, 0x43, 0x33, 0xd1, 0x1c, 0x5c,
0xe2, 0x26, 0x0c, 0x72, 0x40, 0xc7, 0x97, 0x9e, 0x21, 0xb4, 0x23, 0x12, 0x15, 0x2a, 0x0d, 0x89,
0x3f, 0xda, 0x82, 0x53, 0x88, 0xcc, 0xdc, 0xd4, 0x52, 0x24, 0x03, 0x3a, 0x66, 0xde, 0x09, 0x60,
0xee, 0xfb, 0x76, 0xba, 0xae, 0xe1, 0x5d, 0xc0, 0x3c, 0x06, 0xa6, 0x09, 0x59, 0x3f, 0x85, 0x44,
0x0f, 0x79, 0x14, 0x8d, 0x68, 0x0d, 0xd7, 0x9d, 0xa1, 0xd2, 0x22, 0x0e, 0x3f, 0x6d, 0xe6, 0xf7,
0x57, 0x1d, 0xd2, 0xb2, 0xa3, 0xb4, 0x30, 0xcd, 0x2e, 0xfb, 0x90, 0x04, 0xa1, 0x11, 0x8e, 0x77,
0x59, 0x03, 0x65, 0x43, 0xdf, 0x29, 0x99, 0xf6, 0x34, 0x97, 0x3a, 0x7f, 0x74, 0x58, 0xa8, 0x2b,
0xee, 0x26, 0x91, 0xe0, 0x81, 0x99, 0xe7, 0xc5, 0xd6, 0x3e, 0x97, 0xca, 0x0c, 0x75, 0xbc, 0xea,
0x67, 0xf2, 0xa5, 0xf1, 0x27, 0xa0, 0x57, 0x4a, 0xb0, 0xf4, 0x79, 0x02, 0x87, 0xa7, 0x05, 0x4d,
0xb2, 0xe7, 0x99, 0x4e, 0x96, 0xee, 0x27, 0x6e, 0xf5, 0x19, 0x67, 0x3c, 0x2a, 0xc7, 0xe0, 0xd8,
0xd2, 0x2b, 0x0e, 0x0e, 0x96, 0x8b, 0xef, 0x30, 0x5a, 0x79, 0x36, 0xae, 0x46, 0x91, 0x7d, 0x4e,
0x15, 0x88, 0x4d, 0x44, 0xeb, 0x59, 0x01, 0x62, 0x32, 0x52, 0x6c, 0xf2, 0xd3, 0x95, 0x9d, 0x1a,
0x24, 0xad, 0x9d, 0x63, 0xf3, 0x80, 0xa3, 0x63, 0x55, 0x08, 0xeb, 0xd9, 0x3e, 0xa2, 0x2a, 0x5c,
0x66, 0x2c, 0xd1, 0x89, 0x73, 0x68, 0x96, 0x18, 0xb4, 0x71, 0xce, 0x9a, 0x6c, 0x5e, 0x4c, 0x9e,
0x03, 0xb3, 0xb9, 0xd1, 0xfc, 0x88, 0x20, 0x57, 0x8b, 0xf7, 0xe9, 0x21, 0x9c, 0xe9, 0x43, 0x71,
0xf4, 0x2c, 0x7b, 0x60, 0xd9, 0xfe, 0x5f, 0x5a, 0xce, 0xff, 0x2f, 0x2d, 0x6f, 0x83, 0x52, 0x7c,
0x00, 0xbb, 0xa9, 0x19, 0xac, 0xf3, 0x7f, 0x6d, 0x98, 0x07, 0xf8, 0xe5, 0xcf, 0xe2, 0xea, 0x83,
0xda, 0x9b, 0x49, 0x2b, 0xab, 0xdd, 0xa3, 0x67, 0xd7, 0x9e, 0x26, 0xd3, 0xa1, 0xc8, 0xf7, 0x0d,
0x64, 0xea, 0xaf, 0xb5, 0x3a, 0x66, 0x5f, 0x1f, 0x65, 0xf4, 0x9d, 0x8f, 0x3f, 0x3e, 0x08, 0xf5,
0xc9, 0xf0, 0x08, 0xa5, 0xad, 0x58, 0xb6, 0x47, 0x43, 0x91, 0x7d, 0xad, 0x84, 0x89, 0xc6, 0xa2,
0x8f, 0xec, 0x9f, 0xaf, 0x15, 0xab, 0x31, 0x3d, 0xfa, 0xa2, 0xe3, 0x1c, 0x4d, 0x18, 0xe8, 0xf1,
0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xd2, 0xcb, 0x48, 0x3f, 0x13, 0x00, 0x00,
0xf5, 0x57, 0xa9, 0x5b, 0x6a, 0x75, 0x76, 0x49, 0xca, 0x49, 0xcd, 0x22, 0x8f, 0xc7, 0xb6, 0xdc,
0x7f, 0xdb, 0xff, 0xa1, 0xc1, 0x1a, 0xb0, 0x23, 0x80, 0x20, 0xc2, 0x44, 0x48, 0xdd, 0x92, 0xa6,
0xc3, 0x5a, 0x9a, 0x92, 0x64, 0x13, 0x44, 0x80, 0x22, 0x55, 0xf5, 0xd4, 0x2a, 0x4f, 0x55, 0x65,
0x91, 0x99, 0xad, 0x51, 0x73, 0x32, 0xe6, 0x0b, 0x80, 0xf9, 0x02, 0x7c, 0x00, 0xf6, 0xc5, 0x70,
0x64, 0xc7, 0x66, 0xbb, 0x70, 0x61, 0x33, 0x70, 0x84, 0x3b, 0xab, 0x57, 0xe2, 0x65, 0xd6, 0x26,
0x8d, 0x0c, 0x07, 0x6e, 0x95, 0xbf, 0xf7, 0xf2, 0x6d, 0xf9, 0x96, 0xcc, 0x22, 0xae, 0x2f, 0xe2,
0x58, 0x24, 0xcb, 0xa9, 0x14, 0x5a, 0xb0, 0x85, 0x38, 0x8c, 0x4e, 0x46, 0xca, 0xae, 0x96, 0x2d,
0xe9, 0xfa, 0xd2, 0x50, 0x88, 0x61, 0x04, 0xb7, 0x0c, 0x78, 0x38, 0x3a, 0xba, 0x15, 0x80, 0xf2,
0x65, 0x98, 0x6a, 0x21, 0x2d, 0x63, 0xfb, 0x80, 0x4c, 0xef, 0x6a, 0xae, 0x47, 0x8a, 0x3d, 0x45,
0x08, 0x48, 0x29, 0xe4, 0x81, 0x2f, 0x02, 0x58, 0x74, 0x96, 0x9c, 0x9b, 0x73, 0x4f, 0x3c, 0xb8,
0x7c, 0x81, 0xd4, 0xe5, 0x35, 0x64, 0xeb, 0x8a, 0x00, 0xbc, 0x26, 0xe4, 0x9f, 0xec, 0x2a, 0x99,
0x96, 0xc0, 0x95, 0x48, 0x16, 0x27, 0x97, 0x9c, 0x9b, 0x4d, 0x2f, 0x5b, 0xb5, 0xdf, 0x4f, 0xdc,
0xa7, 0x61, 0xfc, 0x0c, 0x8f, 0x46, 0x30, 0xe0, 0xa1, 0x64, 0x94, 0xd4, 0xee, 0xc0, 0xd8, 0xc8,
0x6f, 0x7a, 0xf8, 0xc9, 0x2e, 0x93, 0xa9, 0x13, 0x24, 0x67, 0x1b, 0xed, 0xa2, 0xfd, 0x24, 0x69,
0x3d, 0x0d, 0xe3, 0x1e, 0xd7, 0xfc, 0x1d, 0xb6, 0x31, 0x52, 0x0f, 0xb8, 0xe6, 0x66, 0x97, 0xeb,
0x99, 0xef, 0xf6, 0x0d, 0x52, 0x5f, 0x8d, 0xc4, 0x61, 0x29, 0xd2, 0x31, 0xc4, 0x4c, 0xe4, 0x09,
0xa1, 0x83, 0x88, 0xfb, 0x70, 0x2c, 0xa2, 0x00, 0xa4, 0x31, 0x09, 0xe5, 0x6a, 0x3e, 0xcc, 0xe5,
0x6a, 0x3e, 0x64, 0x1f, 0x24, 0x75, 0x3d, 0x4e, 0xad, 0x35, 0x73, 0x4f, 0x3c, 0x72, 0x61, 0x04,
0x2a, 0x62, 0xf6, 0xc6, 0x29, 0x78, 0x66, 0x07, 0x86, 0xc0, 0x28, 0x52, 0x8b, 0xb5, 0xa5, 0xda,
0x4d, 0xd7, 0xcb, 0x56, 0xed, 0x8f, 0x9f, 0xd1, 0xbb, 0x21, 0xc5, 0x28, 0x65, 0x7d, 0xe2, 0xa6,
0x25, 0xa6, 0x16, 0x9d, 0xa5, 0xda, 0xcd, 0xd6, 0x13, 0x8f, 0xfe, 0x37, 0x6d, 0xc6, 0x68, 0xef,
0xcc, 0xd6, 0xf6, 0xe3, 0xa4, 0xb1, 0x12, 0x04, 0x12, 0x94, 0x62, 0x73, 0x64, 0x32, 0x4c, 0x33,
0x67, 0x26, 0xc3, 0x14, 0x63, 0x94, 0x0a, 0xa9, 0x8d, 0x2f, 0x35, 0xcf, 0x7c, 0xb7, 0x5f, 0x74,
0x48, 0x63, 0x4b, 0x0d, 0x57, 0xb9, 0x02, 0xf6, 0x01, 0x32, 0x13, 0xab, 0xe1, 0x81, 0xf1, 0xd7,
0x9e, 0xf8, 0x8d, 0x0b, 0x2d, 0xd8, 0x52, 0x43, 0xe3, 0x67, 0x23, 0xb6, 0x1f, 0x18, 0xe0, 0x58,
0x0d, 0xfb, 0xbd, 0x4c, 0xb2, 0x5d, 0xb0, 0x1b, 0xa4, 0xa9, 0xc3, 0x18, 0x94, 0xe6, 0x71, 0xba,
0x58, 0x5b, 0x72, 0x6e, 0xd6, 0xbd, 0x12, 0x60, 0xd7, 0xc9, 0x8c, 0x12, 0x23, 0xe9, 0x43, 0xbf,
0xb7, 0x58, 0x37, 0xdb, 0x8a, 0x75, 0xfb, 0x29, 0xd2, 0xdc, 0x52, 0xc3, 0xdb, 0xc0, 0x03, 0x90,
0xec, 0xbd, 0xa4, 0x7e, 0xc8, 0x95, 0xb5, 0xa8, 0xf5, 0xce, 0x16, 0xa1, 0x07, 0x9e, 0xe1, 0x6c,
0x7f, 0x82, 0xb8, 0xbd, 0xad, 0xcd, 0xff, 0x41, 0x02, 0x9a, 0xae, 0x8e, 0xb9, 0x0c, 0xb6, 0x79,
0x9c, 0x27, 0x62, 0x09, 0xb4, 0x5f, 0x75, 0x88, 0x3b, 0x90, 0xe1, 0x49, 0x18, 0xc1, 0x10, 0xd6,
0x4e, 0x35, 0x5b, 0x27, 0xb3, 0x12, 0xac, 0xf5, 0xd5, 0xe8, 0x3d, 0x7c, 0xa1, 0x26, 0x2f, 0xe3,
0x34, 0x21, 0x74, 0x65, 0x65, 0xc5, 0xf6, 0x09, 0x2b, 0xe4, 0xa4, 0xb9, 0x82, 0x2c, 0xf5, 0x1e,
0xfb, 0x8f, 0xc2, 0x0a, 0x73, 0xbc, 0x4b, 0xf2, 0x3c, 0xc4, 0x96, 0xc9, 0x42, 0x21, 0x36, 0xe1,
0x31, 0x1c, 0x84, 0x49, 0x00, 0xa7, 0xe6, 0x48, 0xa6, 0x4a, 0x7e, 0x74, 0xad, 0x8f, 0x84, 0xce,
0xaf, 0x66, 0x48, 0xb3, 0xa8, 0x6a, 0xd6, 0x22, 0x8d, 0xdd, 0x91, 0xef, 0x83, 0x52, 0x74, 0x82,
0x2d, 0x90, 0xf9, 0xfd, 0x04, 0x4e, 0x53, 0xf0, 0x35, 0x04, 0x86, 0x87, 0x3a, 0xec, 0x12, 0x99,
0xed, 0x8a, 0x24, 0x01, 0x5f, 0xaf, 0xf3, 0x30, 0x82, 0x80, 0x4e, 0xb2, 0xcb, 0x84, 0x0e, 0x40,
0xc6, 0xa1, 0x52, 0xa1, 0x48, 0x7a, 0x90, 0x84, 0x10, 0xd0, 0x1a, 0xbb, 0x46, 0x16, 0xba, 0x22,
0x8a, 0xc0, 0xd7, 0xa1, 0x48, 0xb6, 0x85, 0x5e, 0x3b, 0x0d, 0x95, 0x56, 0xb4, 0x8e, 0x62, 0xfb,
0x51, 0x04, 0x43, 0x1e, 0xad, 0xc8, 0xe1, 0x28, 0x86, 0x44, 0xd3, 0x29, 0x94, 0x91, 0x81, 0xbd,
0x30, 0x86, 0x04, 0x25, 0xd1, 0x46, 0x05, 0x35, 0xc6, 0x62, 0xdc, 0xe8, 0x0c, 0xbb, 0x8f, 0x5c,
0xc9, 0xd0, 0x8a, 0x02, 0x1e, 0x03, 0x6d, 0xb2, 0x79, 0xd2, 0xca, 0x48, 0x7b, 0x3b, 0x83, 0xa7,
0x29, 0xa9, 0x48, 0xf0, 0xc4, 0x5d, 0x0f, 0x7c, 0x21, 0x03, 0xda, 0xaa, 0x98, 0xf0, 0x0c, 0xf8,
0x5a, 0xc8, 0x7e, 0x8f, 0xba, 0x68, 0x70, 0x06, 0xee, 0x02, 0x97, 0xfe, 0xb1, 0x07, 0x6a, 0x14,
0x69, 0x3a, 0xcb, 0x28, 0x71, 0xd7, 0xc3, 0x08, 0xb6, 0x85, 0x5e, 0x17, 0xa3, 0x24, 0xa0, 0x73,
0x6c, 0x8e, 0x90, 0x2d, 0xd0, 0x3c, 0x8b, 0xc0, 0x3c, 0xaa, 0xed, 0x72, 0xff, 0x18, 0x32, 0x80,
0xb2, 0xab, 0x84, 0x75, 0x79, 0x92, 0x08, 0xdd, 0x95, 0xc0, 0x35, 0xac, 0x9b, 0x7a, 0xa5, 0x97,
0xd0, 0x9c, 0x33, 0x78, 0x18, 0x01, 0x65, 0x25, 0x77, 0x0f, 0x22, 0x28, 0xb8, 0x17, 0x4a, 0xee,
0x0c, 0x47, 0xee, 0xcb, 0x68, 0xfc, 0xea, 0x28, 0x8c, 0x02, 0x13, 0x12, 0x7b, 0x2c, 0x57, 0xd0,
0xc6, 0xcc, 0xf8, 0xed, 0xcd, 0xfe, 0xee, 0x1e, 0xbd, 0xca, 0xae, 0x90, 0x4b, 0x19, 0xb2, 0x05,
0x5a, 0x86, 0xbe, 0x09, 0xde, 0x35, 0x34, 0x75, 0x67, 0xa4, 0x77, 0x8e, 0xb6, 0x20, 0x16, 0x72,
0x4c, 0x17, 0xf1, 0x40, 0x8d, 0xa4, 0xfc, 0x88, 0xe8, 0x7d, 0xa8, 0x61, 0x2d, 0x4e, 0xf5, 0xb8,
0x0c, 0x2f, 0xbd, 0xce, 0xee, 0x27, 0xd7, 0xf6, 0xd3, 0x80, 0x6b, 0xe8, 0xc7, 0xd8, 0x4c, 0xf6,
0xb8, 0xba, 0x83, 0xee, 0x8e, 0x24, 0xd0, 0xfb, 0xd9, 0x75, 0x72, 0xf5, 0xec, 0x59, 0x14, 0xc1,
0xba, 0x81, 0x1b, 0xad, 0xb7, 0x5d, 0x09, 0x01, 0x24, 0x3a, 0xe4, 0x51, 0xbe, 0xf1, 0x81, 0x52,
0xea, 0xbd, 0xc4, 0x07, 0x91, 0x68, 0x3d, 0xbf, 0x97, 0xf8, 0x10, 0x5b, 0x24, 0x97, 0x37, 0x40,
0xdf, 0x4b, 0x59, 0x42, 0xca, 0x66, 0xa8, 0x0c, 0x69, 0x5f, 0x81, 0x54, 0x39, 0xe5, 0x61, 0xc6,
0xc8, 0xdc, 0x06, 0x68, 0x04, 0x73, 0xac, 0x8d, 0x71, 0xb2, 0xe6, 0x79, 0x22, 0x82, 0x1c, 0xfe,
0x3f, 0x8c, 0x41, 0x4f, 0x8a, 0xb4, 0x0a, 0x3e, 0x82, 0x6e, 0xee, 0xa4, 0x20, 0xb9, 0x06, 0x94,
0x51, 0xa5, 0x3d, 0x8a, 0x72, 0x76, 0x01, 0x23, 0x50, 0x85, 0x1f, 0x2b, 0xe1, 0xaa, 0xd6, 0xff,
0xc7, 0x1c, 0xce, 0xb8, 0xb3, 0x8a, 0xcc, 0x49, 0x37, 0xd1, 0xeb, 0x4c, 0x49, 0x51, 0xd5, 0x39,
0xf1, 0x5d, 0x98, 0x2a, 0x76, 0xdf, 0x86, 0xe4, 0x89, 0xce, 0xf1, 0x0e, 0x7b, 0x98, 0x3c, 0xe0,
0xc1, 0x91, 0x04, 0x75, 0x3c, 0x10, 0x51, 0xe8, 0x8f, 0xfb, 0xc9, 0x91, 0x28, 0x52, 0x12, 0x59,
0xde, 0x8d, 0x96, 0x60, 0x58, 0x2c, 0x3d, 0x87, 0xdf, 0x83, 0x31, 0xd9, 0x16, 0x7a, 0x17, 0x1b,
0xde, 0xa6, 0x69, 0xa1, 0xf4, 0x71, 0xd4, 0xb2, 0x2d, 0x3c, 0x48, 0xa3, 0xd0, 0xe7, 0x2b, 0x27,
0x3c, 0x8c, 0xf8, 0x61, 0x04, 0x74, 0x19, 0x83, 0xb2, 0x0b, 0x43, 0x2c, 0xd9, 0xe2, 0x7c, 0x6f,
0x31, 0x46, 0x66, 0x7b, 0x3d, 0x0f, 0x3e, 0x39, 0x02, 0xa5, 0x3d, 0xee, 0x03, 0xfd, 0x73, 0xa3,
0x33, 0x24, 0xc4, 0x24, 0x15, 0x5e, 0x30, 0x00, 0x55, 0x94, 0xab, 0x6d, 0x91, 0x00, 0x9d, 0x60,
0x2e, 0x99, 0xd9, 0x4f, 0x42, 0xa5, 0x46, 0x10, 0x50, 0x07, 0x0b, 0xaa, 0x9f, 0x0c, 0xa4, 0x18,
0xe2, 0x2c, 0xa3, 0x93, 0x48, 0x5d, 0x0f, 0x93, 0x50, 0x1d, 0x9b, 0x56, 0x42, 0xc8, 0x74, 0x56,
0x59, 0x75, 0x36, 0x4b, 0x9a, 0x2b, 0x87, 0x3c, 0x09, 0x44, 0x02, 0x01, 0x9d, 0xea, 0xbc, 0xe0,
0x10, 0x37, 0x33, 0xc9, 0xea, 0xba, 0x4c, 0x68, 0x75, 0x5d, 0x6a, 0x2b, 0xf2, 0xdb, 0xc1, 0x2e,
0xb7, 0x21, 0xc5, 0xdd, 0x30, 0x19, 0xd2, 0x49, 0x14, 0xbe, 0x0b, 0x3c, 0x32, 0x8a, 0x5a, 0xa4,
0xb1, 0x1e, 0x8d, 0x8c, 0xd6, 0xba, 0xb1, 0x01, 0x17, 0xc8, 0x36, 0x85, 0x24, 0xcc, 0x87, 0x14,
0x02, 0x3a, 0x8d, 0x46, 0xd8, 0x2a, 0x40, 0x5a, 0xa3, 0xf3, 0x61, 0x32, 0x7f, 0xee, 0x5a, 0xc0,
0x66, 0x48, 0x3d, 0x53, 0x4d, 0x89, 0xbb, 0x1a, 0x26, 0x5c, 0x8e, 0x6d, 0xab, 0xa1, 0x01, 0x96,
0xe0, 0x7a, 0x24, 0xb8, 0xce, 0x00, 0xe8, 0xbc, 0xe4, 0x9a, 0xb9, 0x6c, 0x36, 0xce, 0x92, 0xe6,
0x7e, 0x12, 0xc0, 0x51, 0x88, 0xfe, 0x4d, 0x98, 0x16, 0x60, 0x8b, 0xa7, 0xac, 0xc5, 0x00, 0x03,
0x8a, 0xc6, 0x54, 0x30, 0xc0, 0x3a, 0xbe, 0xcd, 0x55, 0x05, 0x3a, 0xc2, 0x63, 0xec, 0x99, 0x5b,
0xdf, 0x61, 0x75, 0xfb, 0xd0, 0x1c, 0xe3, 0xb1, 0xb8, 0x5b, 0x62, 0x8a, 0x1e, 0xa3, 0xa6, 0x0d,
0xd0, 0xbb, 0x63, 0xa5, 0x21, 0xee, 0x8a, 0xe4, 0x28, 0x1c, 0x2a, 0x1a, 0xa2, 0xa6, 0x4d, 0xc1,
0x83, 0xca, 0xf6, 0xe7, 0x30, 0x91, 0x3c, 0x88, 0x80, 0xab, 0xaa, 0xd4, 0x3b, 0xa6, 0x09, 0x1a,
0x53, 0x57, 0xa2, 0x90, 0x2b, 0x1a, 0xa1, 0x2b, 0x68, 0xa5, 0x5d, 0xc6, 0x78, 0xc6, 0x2b, 0x91,
0x06, 0x69, 0xd7, 0x09, 0xbb, 0x4c, 0xe6, 0x2d, 0xff, 0x80, 0x4b, 0x1d, 0x1a, 0x21, 0x2f, 0x3b,
0x26, 0x9b, 0xa4, 0x48, 0x4b, 0xec, 0x15, 0x9c, 0x39, 0xee, 0x6d, 0xae, 0x4a, 0xe8, 0xa7, 0x0e,
0xbb, 0x4a, 0x2e, 0xe5, 0xae, 0x95, 0xf8, 0xcf, 0x1c, 0xb6, 0x40, 0xe6, 0xd0, 0xb5, 0x02, 0x53,
0xf4, 0xe7, 0x06, 0x44, 0x27, 0x2a, 0xe0, 0x2f, 0x8c, 0x84, 0xcc, 0x8b, 0x0a, 0xfe, 0x4b, 0xa3,
0x0c, 0x25, 0x64, 0x49, 0xa4, 0xe8, 0x6b, 0x0e, 0x5a, 0x9a, 0x2b, 0xcb, 0x60, 0xfa, 0xba, 0x61,
0x44, 0xa9, 0x05, 0xe3, 0x1b, 0x86, 0x31, 0x93, 0x59, 0xa0, 0x6f, 0x1a, 0xf4, 0x36, 0x66, 0xec,
0xd1, 0x51, 0x81, 0xbe, 0xe5, 0xb0, 0x45, 0xb2, 0x80, 0xdb, 0x57, 0x79, 0xc4, 0x13, 0xbf, 0xe4,
0x7f, 0xdb, 0x61, 0x57, 0x08, 0x3d, 0xa7, 0x4e, 0xd1, 0xe7, 0x27, 0x19, 0xcd, 0xe3, 0x6b, 0x6a,
0x89, 0x7e, 0x71, 0xd2, 0xc4, 0x2a, 0x63, 0xb4, 0xd8, 0x97, 0x26, 0xd9, 0x9c, 0x0d, 0xba, 0x5d,
0x7f, 0x79, 0x92, 0xb5, 0xc8, 0x74, 0x3f, 0x51, 0x20, 0x35, 0xfd, 0x2c, 0xe6, 0xf7, 0xb4, 0x6d,
0xa8, 0xf4, 0x73, 0x58, 0x55, 0x53, 0x26, 0xbf, 0xe9, 0x8b, 0x38, 0xac, 0x99, 0x07, 0x0a, 0x92,
0xa0, 0x52, 0x3b, 0x8a, 0x7e, 0xde, 0xec, 0xb0, 0xd3, 0x90, 0xfe, 0xb5, 0x66, 0x42, 0x53, 0x1d,
0x8d, 0x7f, 0xab, 0xa1, 0x09, 0x1b, 0xa0, 0xcb, 0xea, 0xa6, 0x7f, 0xaf, 0xb1, 0xeb, 0xe4, 0x4a,
0x8e, 0x99, 0x41, 0x55, 0xd4, 0xf5, 0x3f, 0x6a, 0xec, 0x06, 0xb9, 0x86, 0x5d, 0xbb, 0xc8, 0x1b,
0xdc, 0x14, 0x2a, 0x1d, 0xfa, 0x8a, 0xfe, 0xb3, 0xc6, 0xee, 0x27, 0x57, 0x37, 0x40, 0x17, 0xe7,
0x51, 0x21, 0xfe, 0xab, 0xc6, 0x66, 0xc9, 0x8c, 0x87, 0x93, 0x0c, 0x4e, 0x80, 0xbe, 0x56, 0xc3,
0x43, 0xcd, 0x97, 0x99, 0x39, 0xaf, 0xd7, 0x30, 0xd4, 0xcf, 0x72, 0xed, 0x1f, 0xf7, 0xe2, 0xee,
0x31, 0x4f, 0x12, 0x88, 0x14, 0x7d, 0xa3, 0x86, 0x01, 0xf5, 0x20, 0x16, 0x27, 0x50, 0x81, 0xdf,
0x34, 0x4e, 0x1b, 0xe6, 0x8f, 0x8c, 0x40, 0x8e, 0x0b, 0xc2, 0x5b, 0x35, 0x3c, 0x1a, 0xcb, 0x7f,
0x96, 0xf2, 0x76, 0x8d, 0x3d, 0x40, 0x16, 0x6d, 0xb3, 0xc8, 0x0f, 0x06, 0x89, 0x43, 0xc0, 0x6e,
0x4b, 0x9f, 0xaf, 0x17, 0x12, 0x7b, 0x10, 0x69, 0x5e, 0xec, 0xfb, 0x74, 0x1d, 0xed, 0xc2, 0xe2,
0x2a, 0x9b, 0xac, 0xa2, 0x2f, 0xd4, 0xf1, 0x44, 0x37, 0x40, 0x67, 0x7d, 0x56, 0xd1, 0xcf, 0x18,
0x24, 0x93, 0x6c, 0x44, 0xfe, 0xba, 0xce, 0xe6, 0x09, 0xb1, 0x35, 0x69, 0x80, 0xdf, 0xe4, 0xa2,
0xf0, 0x2a, 0x73, 0x02, 0xd2, 0xf4, 0x79, 0xfa, 0xdb, 0x42, 0x41, 0xa5, 0xf3, 0xd1, 0xdf, 0x61,
0xb3, 0x9c, 0xd9, 0x0b, 0x63, 0xd8, 0x0b, 0xfd, 0x3b, 0xf4, 0xab, 0x4d, 0x0c, 0x99, 0xf1, 0x68,
0x5b, 0x04, 0x60, 0x4f, 0xf8, 0x6b, 0x4d, 0x4c, 0x18, 0xcc, 0x43, 0x9b, 0x30, 0x5f, 0x37, 0xeb,
0xac, 0x99, 0xf7, 0x7b, 0xf4, 0x1b, 0x78, 0xa5, 0x22, 0xd9, 0x7a, 0x6f, 0x77, 0x87, 0x7e, 0xb3,
0x89, 0xaa, 0x56, 0xa2, 0x48, 0xf8, 0x5c, 0x17, 0xd5, 0xf0, 0xad, 0x26, 0x96, 0x53, 0x45, 0x7b,
0x76, 0x6a, 0x2f, 0x35, 0x31, 0xf6, 0x19, 0x6e, 0x92, 0xad, 0x87, 0x4d, 0xf1, 0xdb, 0x46, 0x2a,
0x3e, 0xf0, 0xd0, 0x92, 0x3d, 0x4d, 0xbf, 0x63, 0xf8, 0xce, 0xdf, 0x12, 0xe8, 0xef, 0x5b, 0x59,
0x7e, 0x55, 0xb0, 0x57, 0x5b, 0xb6, 0x3e, 0xce, 0x5e, 0x0b, 0xe8, 0x1f, 0x0c, 0x7c, 0xfe, 0x2a,
0x41, 0xff, 0xd8, 0x42, 0xc3, 0xaa, 0xb7, 0x01, 0xbc, 0x13, 0x2b, 0xfa, 0xa7, 0x16, 0x5a, 0x50,
0xce, 0x7d, 0xfa, 0x5d, 0x17, 0x83, 0x95, 0x4f, 0x7c, 0xfa, 0x3d, 0x17, 0xdd, 0x3c, 0x37, 0xeb,
0xe9, 0xf7, 0x5d, 0x73, 0x1c, 0xc5, 0x94, 0xa7, 0x3f, 0xa8, 0x00, 0xc8, 0x45, 0x7f, 0xe8, 0x9a,
0x0e, 0x74, 0x66, 0xb2, 0xd3, 0x1f, 0xb9, 0x68, 0xdb, 0xf9, 0x99, 0x4e, 0x7f, 0xec, 0xda, 0xe3,
0x2e, 0xa6, 0x39, 0xfd, 0x89, 0x8b, 0x15, 0x70, 0xf1, 0x1c, 0xa7, 0x2f, 0x1b, 0x5d, 0xe5, 0x04,
0xa7, 0xaf, 0xb8, 0x9d, 0x36, 0x69, 0xf4, 0x54, 0x64, 0xe6, 0x46, 0x83, 0xd4, 0x7a, 0x2a, 0xa2,
0x13, 0xd8, 0x66, 0x57, 0x85, 0x88, 0xd6, 0x4e, 0x53, 0xf9, 0xcc, 0xfb, 0xa8, 0xd3, 0x59, 0x25,
0xf3, 0x5d, 0x11, 0xa7, 0xbc, 0x28, 0x37, 0x33, 0x2a, 0xec, 0x8c, 0x81, 0xc0, 0xa6, 0xca, 0x04,
0xf6, 0xea, 0xb5, 0x53, 0xf0, 0x47, 0x66, 0xa2, 0x39, 0xb8, 0xc4, 0x4d, 0x18, 0xe4, 0x80, 0x4e,
0x76, 0x3e, 0x4a, 0x68, 0x57, 0x24, 0x2a, 0x54, 0x1a, 0x12, 0x7f, 0xbc, 0x09, 0x27, 0x10, 0x99,
0xb9, 0xa9, 0xa5, 0x48, 0x86, 0x74, 0xc2, 0x3c, 0x1b, 0xc0, 0x5c, 0xff, 0xed, 0x74, 0x5d, 0xc5,
0xab, 0x81, 0x79, 0x1b, 0xcc, 0x11, 0xb2, 0x76, 0x02, 0x89, 0x1e, 0xf1, 0x28, 0x1a, 0xd3, 0x1a,
0xae, 0xbb, 0x23, 0xa5, 0x45, 0x1c, 0x7e, 0x0a, 0x87, 0x6c, 0xe7, 0x2b, 0x0e, 0x69, 0xd9, 0x51,
0x5a, 0x98, 0x66, 0x97, 0x03, 0x48, 0x82, 0xd0, 0x08, 0xc7, 0xab, 0xad, 0x81, 0xb2, 0x3b, 0x80,
0x53, 0x32, 0xed, 0x6a, 0x2e, 0x75, 0xfe, 0x06, 0xb1, 0x50, 0x4f, 0xdc, 0x4d, 0x22, 0xc1, 0x03,
0x33, 0xcf, 0x8b, 0xad, 0x03, 0x2e, 0x95, 0x19, 0xea, 0x78, 0xf3, 0xcf, 0xe4, 0x4b, 0xe3, 0x4f,
0x40, 0xa7, 0x4a, 0xb0, 0xf4, 0x79, 0x1a, 0x87, 0xa7, 0x05, 0x4d, 0xb2, 0xe7, 0x99, 0x4e, 0x3a,
0x0f, 0x12, 0xb7, 0xfa, 0xaa, 0x33, 0x1e, 0x95, 0x63, 0x70, 0xa2, 0xf3, 0xaa, 0x83, 0x83, 0xe5,
0xfc, 0xb3, 0x8c, 0x56, 0x5e, 0x91, 0x2b, 0x51, 0x64, 0x5f, 0x57, 0x05, 0x62, 0x13, 0xd1, 0x7a,
0x56, 0x80, 0x98, 0x8c, 0x14, 0x9b, 0xfc, 0x5c, 0x65, 0xa7, 0x06, 0x49, 0x6b, 0x67, 0xd8, 0x3c,
0xe0, 0xe8, 0x58, 0x15, 0xc2, 0x7a, 0xb6, 0x6f, 0xaa, 0x0a, 0x97, 0x19, 0x4b, 0x74, 0xfa, 0x0c,
0x9a, 0x25, 0x06, 0x6d, 0x9c, 0xb1, 0x26, 0x9b, 0x17, 0x33, 0x67, 0xc0, 0x6c, 0x6e, 0x34, 0x3f,
0x24, 0xc8, 0xa5, 0xe2, 0xb9, 0x7a, 0x00, 0xa7, 0xfa, 0x40, 0x1c, 0x3e, 0xc7, 0x1e, 0x5a, 0xb6,
0xbf, 0x9b, 0x96, 0xf3, 0xdf, 0x4d, 0xcb, 0x5b, 0xa0, 0x14, 0x1f, 0xc2, 0x4e, 0x6a, 0x06, 0xeb,
0xe2, 0x5f, 0x1a, 0xe6, 0x3d, 0x7e, 0xf1, 0x2b, 0xb9, 0xfa, 0xbe, 0xf6, 0xe6, 0xd3, 0xca, 0x6a,
0xe7, 0xf0, 0xb9, 0xd5, 0x67, 0xc9, 0x5c, 0x28, 0xf2, 0x7d, 0x43, 0x99, 0xfa, 0xab, 0xad, 0xae,
0xd9, 0x37, 0x40, 0x19, 0x03, 0xe7, 0x63, 0x4f, 0x0e, 0x43, 0x7d, 0x3c, 0x3a, 0x44, 0x69, 0xb7,
0x2c, 0xdb, 0xe3, 0xa1, 0xc8, 0xbe, 0x6e, 0x85, 0x89, 0xc6, 0xa2, 0x8f, 0xec, 0x8f, 0xb0, 0x5b,
0x56, 0x63, 0x7a, 0xf8, 0x05, 0xc7, 0x39, 0x9c, 0x36, 0xd0, 0x93, 0xff, 0x0e, 0x00, 0x00, 0xff,
0xff, 0x84, 0x1a, 0xd6, 0xb6, 0x4e, 0x13, 0x00, 0x00,
}

View File

@ -0,0 +1,66 @@
syntax = "proto3";
package milvus.proto.indexnode;
option go_package = "github.com/milvus-io/milvus/internal/proto/indexnodepb";
import "common.proto";
message CreateJobRequest {
int64 clusterID = 1;
string index_file_prefix = 2;
int64 buildID = 3;
repeated string data_paths = 4;
int64 index_version = 5;
int64 indexID = 6;
string index_name = 7;
string bucket_name = 8;
string storage_access_key = 9;
repeated common.KeyValuePair index_params = 10;
repeated common.KeyValuePair type_params = 11;
}
message QueryJobsRequest {
int64 clusterID = 1;
repeated int64 buildIDs = 2;
}
message IndexInfo {
int64 buildID = 1;
common.IndexState state = 2;
repeated string index_files = 3;
}
message QueryJobsRespond {
common.Status status = 1;
int64 clusterID = 2;
repeated IndexInfo index_infos = 3;
}
message DropJobsRequest {
int64 clusterID = 1;
repeated int64 buildIDs = 2;
}
message JobInfo {
int64 num_rows = 1;
int64 dim = 2;
int64 start_time = 3;
int64 end_time = 4;
repeated common.KeyValuePair index_params = 5;
int64 podID = 6;
}
message GetJobStatsRequest {
}
message GetJobStatsRespond {
common.Status status = 1;
int64 pendingJobs = 2;
repeated JobInfo job_infos = 3;
}
service IndexNode {
rpc CreateJob(CreateJobRequest) returns (common.Status) {}
rpc QueryJobs(QueryJobsRequest) returns (QueryJobsRespond) {}
rpc DropJobs(DropJobsRequest) returns (common.Status) {}
rpc GetJobStats(GetJobStatsRequest) returns (GetJobStatsRespond) {}
}

View File

@ -0,0 +1,764 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: index_node.proto
package indexnodepb
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
commonpb "github.com/milvus-io/milvus/internal/proto/commonpb"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type CreateJobRequest struct {
ClusterID int64 `protobuf:"varint,1,opt,name=clusterID,proto3" json:"clusterID,omitempty"`
IndexFilePrefix string `protobuf:"bytes,2,opt,name=index_file_prefix,json=indexFilePrefix,proto3" json:"index_file_prefix,omitempty"`
BuildID int64 `protobuf:"varint,3,opt,name=buildID,proto3" json:"buildID,omitempty"`
DataPaths []string `protobuf:"bytes,4,rep,name=data_paths,json=dataPaths,proto3" json:"data_paths,omitempty"`
IndexVersion int64 `protobuf:"varint,5,opt,name=index_version,json=indexVersion,proto3" json:"index_version,omitempty"`
IndexID int64 `protobuf:"varint,6,opt,name=indexID,proto3" json:"indexID,omitempty"`
IndexName string `protobuf:"bytes,7,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
BucketName string `protobuf:"bytes,8,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
StorageAccessKey string `protobuf:"bytes,9,opt,name=storage_access_key,json=storageAccessKey,proto3" json:"storage_access_key,omitempty"`
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,10,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
TypeParams []*commonpb.KeyValuePair `protobuf:"bytes,11,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateJobRequest) Reset() { *m = CreateJobRequest{} }
func (m *CreateJobRequest) String() string { return proto.CompactTextString(m) }
func (*CreateJobRequest) ProtoMessage() {}
func (*CreateJobRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{0}
}
func (m *CreateJobRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateJobRequest.Unmarshal(m, b)
}
func (m *CreateJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateJobRequest.Marshal(b, m, deterministic)
}
func (m *CreateJobRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateJobRequest.Merge(m, src)
}
func (m *CreateJobRequest) XXX_Size() int {
return xxx_messageInfo_CreateJobRequest.Size(m)
}
func (m *CreateJobRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateJobRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateJobRequest proto.InternalMessageInfo
func (m *CreateJobRequest) GetClusterID() int64 {
if m != nil {
return m.ClusterID
}
return 0
}
func (m *CreateJobRequest) GetIndexFilePrefix() string {
if m != nil {
return m.IndexFilePrefix
}
return ""
}
func (m *CreateJobRequest) GetBuildID() int64 {
if m != nil {
return m.BuildID
}
return 0
}
func (m *CreateJobRequest) GetDataPaths() []string {
if m != nil {
return m.DataPaths
}
return nil
}
func (m *CreateJobRequest) GetIndexVersion() int64 {
if m != nil {
return m.IndexVersion
}
return 0
}
func (m *CreateJobRequest) GetIndexID() int64 {
if m != nil {
return m.IndexID
}
return 0
}
func (m *CreateJobRequest) GetIndexName() string {
if m != nil {
return m.IndexName
}
return ""
}
func (m *CreateJobRequest) GetBucketName() string {
if m != nil {
return m.BucketName
}
return ""
}
func (m *CreateJobRequest) GetStorageAccessKey() string {
if m != nil {
return m.StorageAccessKey
}
return ""
}
func (m *CreateJobRequest) GetIndexParams() []*commonpb.KeyValuePair {
if m != nil {
return m.IndexParams
}
return nil
}
func (m *CreateJobRequest) GetTypeParams() []*commonpb.KeyValuePair {
if m != nil {
return m.TypeParams
}
return nil
}
type QueryJobsRequest struct {
ClusterID int64 `protobuf:"varint,1,opt,name=clusterID,proto3" json:"clusterID,omitempty"`
BuildIDs []int64 `protobuf:"varint,2,rep,packed,name=buildIDs,proto3" json:"buildIDs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryJobsRequest) Reset() { *m = QueryJobsRequest{} }
func (m *QueryJobsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryJobsRequest) ProtoMessage() {}
func (*QueryJobsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{1}
}
func (m *QueryJobsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryJobsRequest.Unmarshal(m, b)
}
func (m *QueryJobsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_QueryJobsRequest.Marshal(b, m, deterministic)
}
func (m *QueryJobsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryJobsRequest.Merge(m, src)
}
func (m *QueryJobsRequest) XXX_Size() int {
return xxx_messageInfo_QueryJobsRequest.Size(m)
}
func (m *QueryJobsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_QueryJobsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_QueryJobsRequest proto.InternalMessageInfo
func (m *QueryJobsRequest) GetClusterID() int64 {
if m != nil {
return m.ClusterID
}
return 0
}
func (m *QueryJobsRequest) GetBuildIDs() []int64 {
if m != nil {
return m.BuildIDs
}
return nil
}
type IndexInfo struct {
BuildID int64 `protobuf:"varint,1,opt,name=buildID,proto3" json:"buildID,omitempty"`
State commonpb.IndexState `protobuf:"varint,2,opt,name=state,proto3,enum=milvus.proto.common.IndexState" json:"state,omitempty"`
IndexFiles []string `protobuf:"bytes,3,rep,name=index_files,json=indexFiles,proto3" json:"index_files,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IndexInfo) Reset() { *m = IndexInfo{} }
func (m *IndexInfo) String() string { return proto.CompactTextString(m) }
func (*IndexInfo) ProtoMessage() {}
func (*IndexInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{2}
}
func (m *IndexInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IndexInfo.Unmarshal(m, b)
}
func (m *IndexInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IndexInfo.Marshal(b, m, deterministic)
}
func (m *IndexInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_IndexInfo.Merge(m, src)
}
func (m *IndexInfo) XXX_Size() int {
return xxx_messageInfo_IndexInfo.Size(m)
}
func (m *IndexInfo) XXX_DiscardUnknown() {
xxx_messageInfo_IndexInfo.DiscardUnknown(m)
}
var xxx_messageInfo_IndexInfo proto.InternalMessageInfo
func (m *IndexInfo) GetBuildID() int64 {
if m != nil {
return m.BuildID
}
return 0
}
func (m *IndexInfo) GetState() commonpb.IndexState {
if m != nil {
return m.State
}
return commonpb.IndexState_IndexStateNone
}
func (m *IndexInfo) GetIndexFiles() []string {
if m != nil {
return m.IndexFiles
}
return nil
}
type QueryJobsRespond struct {
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
ClusterID int64 `protobuf:"varint,2,opt,name=clusterID,proto3" json:"clusterID,omitempty"`
IndexInfos []*IndexInfo `protobuf:"bytes,3,rep,name=index_infos,json=indexInfos,proto3" json:"index_infos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryJobsRespond) Reset() { *m = QueryJobsRespond{} }
func (m *QueryJobsRespond) String() string { return proto.CompactTextString(m) }
func (*QueryJobsRespond) ProtoMessage() {}
func (*QueryJobsRespond) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{3}
}
func (m *QueryJobsRespond) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryJobsRespond.Unmarshal(m, b)
}
func (m *QueryJobsRespond) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_QueryJobsRespond.Marshal(b, m, deterministic)
}
func (m *QueryJobsRespond) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryJobsRespond.Merge(m, src)
}
func (m *QueryJobsRespond) XXX_Size() int {
return xxx_messageInfo_QueryJobsRespond.Size(m)
}
func (m *QueryJobsRespond) XXX_DiscardUnknown() {
xxx_messageInfo_QueryJobsRespond.DiscardUnknown(m)
}
var xxx_messageInfo_QueryJobsRespond proto.InternalMessageInfo
func (m *QueryJobsRespond) GetStatus() *commonpb.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *QueryJobsRespond) GetClusterID() int64 {
if m != nil {
return m.ClusterID
}
return 0
}
func (m *QueryJobsRespond) GetIndexInfos() []*IndexInfo {
if m != nil {
return m.IndexInfos
}
return nil
}
type DropJobsRequest struct {
ClusterID int64 `protobuf:"varint,1,opt,name=clusterID,proto3" json:"clusterID,omitempty"`
BuildIDs []int64 `protobuf:"varint,2,rep,packed,name=buildIDs,proto3" json:"buildIDs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DropJobsRequest) Reset() { *m = DropJobsRequest{} }
func (m *DropJobsRequest) String() string { return proto.CompactTextString(m) }
func (*DropJobsRequest) ProtoMessage() {}
func (*DropJobsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{4}
}
func (m *DropJobsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DropJobsRequest.Unmarshal(m, b)
}
func (m *DropJobsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DropJobsRequest.Marshal(b, m, deterministic)
}
func (m *DropJobsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DropJobsRequest.Merge(m, src)
}
func (m *DropJobsRequest) XXX_Size() int {
return xxx_messageInfo_DropJobsRequest.Size(m)
}
func (m *DropJobsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DropJobsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DropJobsRequest proto.InternalMessageInfo
func (m *DropJobsRequest) GetClusterID() int64 {
if m != nil {
return m.ClusterID
}
return 0
}
func (m *DropJobsRequest) GetBuildIDs() []int64 {
if m != nil {
return m.BuildIDs
}
return nil
}
type JobInfo struct {
NumRows int64 `protobuf:"varint,1,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"`
Dim int64 `protobuf:"varint,2,opt,name=dim,proto3" json:"dim,omitempty"`
StartTime int64 `protobuf:"varint,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
EndTime int64 `protobuf:"varint,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,5,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
PodID int64 `protobuf:"varint,6,opt,name=podID,proto3" json:"podID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *JobInfo) Reset() { *m = JobInfo{} }
func (m *JobInfo) String() string { return proto.CompactTextString(m) }
func (*JobInfo) ProtoMessage() {}
func (*JobInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{5}
}
func (m *JobInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_JobInfo.Unmarshal(m, b)
}
func (m *JobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_JobInfo.Marshal(b, m, deterministic)
}
func (m *JobInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_JobInfo.Merge(m, src)
}
func (m *JobInfo) XXX_Size() int {
return xxx_messageInfo_JobInfo.Size(m)
}
func (m *JobInfo) XXX_DiscardUnknown() {
xxx_messageInfo_JobInfo.DiscardUnknown(m)
}
var xxx_messageInfo_JobInfo proto.InternalMessageInfo
func (m *JobInfo) GetNumRows() int64 {
if m != nil {
return m.NumRows
}
return 0
}
func (m *JobInfo) GetDim() int64 {
if m != nil {
return m.Dim
}
return 0
}
func (m *JobInfo) GetStartTime() int64 {
if m != nil {
return m.StartTime
}
return 0
}
func (m *JobInfo) GetEndTime() int64 {
if m != nil {
return m.EndTime
}
return 0
}
func (m *JobInfo) GetIndexParams() []*commonpb.KeyValuePair {
if m != nil {
return m.IndexParams
}
return nil
}
func (m *JobInfo) GetPodID() int64 {
if m != nil {
return m.PodID
}
return 0
}
type GetJobStatsRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetJobStatsRequest) Reset() { *m = GetJobStatsRequest{} }
func (m *GetJobStatsRequest) String() string { return proto.CompactTextString(m) }
func (*GetJobStatsRequest) ProtoMessage() {}
func (*GetJobStatsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{6}
}
func (m *GetJobStatsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetJobStatsRequest.Unmarshal(m, b)
}
func (m *GetJobStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetJobStatsRequest.Marshal(b, m, deterministic)
}
func (m *GetJobStatsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetJobStatsRequest.Merge(m, src)
}
func (m *GetJobStatsRequest) XXX_Size() int {
return xxx_messageInfo_GetJobStatsRequest.Size(m)
}
func (m *GetJobStatsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetJobStatsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetJobStatsRequest proto.InternalMessageInfo
type GetJobStatsRespond struct {
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
PendingJobs int64 `protobuf:"varint,2,opt,name=pendingJobs,proto3" json:"pendingJobs,omitempty"`
JobInfos []*JobInfo `protobuf:"bytes,3,rep,name=job_infos,json=jobInfos,proto3" json:"job_infos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetJobStatsRespond) Reset() { *m = GetJobStatsRespond{} }
func (m *GetJobStatsRespond) String() string { return proto.CompactTextString(m) }
func (*GetJobStatsRespond) ProtoMessage() {}
func (*GetJobStatsRespond) Descriptor() ([]byte, []int) {
return fileDescriptor_7c97d3c987a256af, []int{7}
}
func (m *GetJobStatsRespond) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetJobStatsRespond.Unmarshal(m, b)
}
func (m *GetJobStatsRespond) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetJobStatsRespond.Marshal(b, m, deterministic)
}
func (m *GetJobStatsRespond) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetJobStatsRespond.Merge(m, src)
}
func (m *GetJobStatsRespond) XXX_Size() int {
return xxx_messageInfo_GetJobStatsRespond.Size(m)
}
func (m *GetJobStatsRespond) XXX_DiscardUnknown() {
xxx_messageInfo_GetJobStatsRespond.DiscardUnknown(m)
}
var xxx_messageInfo_GetJobStatsRespond proto.InternalMessageInfo
func (m *GetJobStatsRespond) GetStatus() *commonpb.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *GetJobStatsRespond) GetPendingJobs() int64 {
if m != nil {
return m.PendingJobs
}
return 0
}
func (m *GetJobStatsRespond) GetJobInfos() []*JobInfo {
if m != nil {
return m.JobInfos
}
return nil
}
func init() {
proto.RegisterType((*CreateJobRequest)(nil), "milvus.proto.indexnode.CreateJobRequest")
proto.RegisterType((*QueryJobsRequest)(nil), "milvus.proto.indexnode.QueryJobsRequest")
proto.RegisterType((*IndexInfo)(nil), "milvus.proto.indexnode.IndexInfo")
proto.RegisterType((*QueryJobsRespond)(nil), "milvus.proto.indexnode.QueryJobsRespond")
proto.RegisterType((*DropJobsRequest)(nil), "milvus.proto.indexnode.DropJobsRequest")
proto.RegisterType((*JobInfo)(nil), "milvus.proto.indexnode.JobInfo")
proto.RegisterType((*GetJobStatsRequest)(nil), "milvus.proto.indexnode.GetJobStatsRequest")
proto.RegisterType((*GetJobStatsRespond)(nil), "milvus.proto.indexnode.GetJobStatsRespond")
}
func init() { proto.RegisterFile("index_node.proto", fileDescriptor_7c97d3c987a256af) }
var fileDescriptor_7c97d3c987a256af = []byte{
// 719 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xc1, 0x6e, 0xdb, 0x46,
0x10, 0xb5, 0x4c, 0xcb, 0x16, 0x87, 0x6e, 0xad, 0x2e, 0x8c, 0x82, 0x55, 0x5b, 0x58, 0x65, 0x0f,
0x15, 0x8c, 0x56, 0x02, 0x64, 0xb4, 0xe8, 0xa1, 0x97, 0xba, 0x42, 0x03, 0xc9, 0x81, 0xa1, 0xd0,
0x86, 0x0f, 0xb9, 0x10, 0x4b, 0x71, 0x24, 0xaf, 0x2d, 0x72, 0x19, 0xee, 0xd2, 0xb6, 0x0e, 0x39,
0xe4, 0x73, 0xf2, 0x2f, 0xfe, 0xa8, 0x60, 0x77, 0x29, 0xda, 0x92, 0xad, 0x44, 0x48, 0x72, 0xdb,
0x79, 0x33, 0xf3, 0x38, 0xfb, 0xde, 0x2c, 0xa1, 0xce, 0x92, 0x08, 0xef, 0x82, 0x84, 0x47, 0xd8,
0x4e, 0x33, 0x2e, 0x39, 0xf9, 0x3e, 0x66, 0xd3, 0x9b, 0x5c, 0x98, 0xa8, 0xad, 0xd3, 0x2a, 0xdb,
0xd8, 0x1d, 0xf1, 0x38, 0xe6, 0x89, 0xc1, 0xbd, 0x7b, 0x0b, 0xea, 0xff, 0x65, 0x48, 0x25, 0x0e,
0x78, 0xe8, 0xe3, 0x9b, 0x1c, 0x85, 0x24, 0x3f, 0x81, 0x3d, 0x9a, 0xe6, 0x42, 0x62, 0xd6, 0xef,
0xb9, 0x95, 0x66, 0xa5, 0x65, 0xf9, 0x0f, 0x00, 0x39, 0x84, 0xef, 0xcc, 0xc7, 0xc6, 0x6c, 0x8a,
0x41, 0x9a, 0xe1, 0x98, 0xdd, 0xb9, 0x9b, 0xcd, 0x4a, 0xcb, 0xf6, 0xf7, 0x74, 0xe2, 0x7f, 0x36,
0xc5, 0xa1, 0x86, 0x89, 0x0b, 0x3b, 0x61, 0xce, 0xa6, 0x51, 0xbf, 0xe7, 0x5a, 0x9a, 0x67, 0x1e,
0x92, 0x9f, 0x01, 0x22, 0x2a, 0x69, 0x90, 0x52, 0x79, 0x29, 0xdc, 0xad, 0xa6, 0xd5, 0xb2, 0x7d,
0x5b, 0x21, 0x43, 0x05, 0x90, 0x5f, 0xe1, 0x1b, 0xf3, 0x91, 0x1b, 0xcc, 0x04, 0xe3, 0x89, 0x5b,
0xd5, 0xed, 0xbb, 0x1a, 0xbc, 0x30, 0x98, 0x62, 0xd7, 0x71, 0xbf, 0xe7, 0x6e, 0x1b, 0xf6, 0x22,
0x54, 0xec, 0x85, 0x20, 0x34, 0x46, 0x77, 0x47, 0x0f, 0x67, 0x6b, 0xe4, 0x94, 0xc6, 0x48, 0x0e,
0xc0, 0x09, 0xf3, 0xd1, 0x35, 0x4a, 0x93, 0xaf, 0xe9, 0x3c, 0x18, 0x48, 0x17, 0xfc, 0x0e, 0x44,
0x48, 0x9e, 0xd1, 0x09, 0x06, 0x74, 0x34, 0x42, 0x21, 0x82, 0x6b, 0x9c, 0xb9, 0xb6, 0xae, 0xab,
0x17, 0x99, 0x7f, 0x75, 0xe2, 0x04, 0x67, 0xa4, 0x07, 0x66, 0xae, 0x20, 0xa5, 0x19, 0x8d, 0x85,
0x0b, 0x4d, 0xab, 0xe5, 0x74, 0x7f, 0x69, 0x2f, 0x38, 0x50, 0xc8, 0x7e, 0x82, 0xb3, 0x0b, 0x3a,
0xcd, 0x71, 0x48, 0x59, 0xe6, 0x3b, 0xba, 0x6d, 0xa8, 0xbb, 0xc8, 0x31, 0x38, 0x72, 0x96, 0xe2,
0x9c, 0xc4, 0x59, 0x97, 0x04, 0x54, 0x97, 0xe1, 0xf0, 0x5e, 0x42, 0xfd, 0x55, 0x8e, 0xd9, 0x6c,
0xc0, 0x43, 0xb1, 0x9e, 0x9b, 0x0d, 0xa8, 0x15, 0x96, 0x08, 0x77, 0xb3, 0x69, 0xb5, 0x2c, 0xbf,
0x8c, 0xbd, 0xb7, 0x60, 0xf7, 0xb5, 0xa0, 0xc9, 0x98, 0x3f, 0xb6, 0xb2, 0xb2, 0x68, 0xe5, 0x9f,
0x50, 0x15, 0x92, 0x4a, 0xd4, 0x4b, 0xf0, 0x6d, 0xf7, 0xe0, 0xd9, 0x91, 0x35, 0xd1, 0x99, 0x2a,
0xf3, 0x4d, 0xb5, 0x32, 0xe1, 0x61, 0x8f, 0x84, 0x6b, 0xe9, 0x15, 0x80, 0x72, 0x83, 0x84, 0xf7,
0xbe, 0xb2, 0x70, 0x1b, 0x91, 0xf2, 0x24, 0x22, 0x47, 0xb0, 0xad, 0xda, 0x73, 0xa1, 0xa7, 0x70,
0xba, 0x3f, 0x3e, 0xfb, 0xb5, 0x33, 0x5d, 0xe2, 0x17, 0xa5, 0x8b, 0x12, 0x6c, 0x2e, 0x4b, 0x70,
0x3c, 0x1f, 0x84, 0x25, 0x63, 0x6e, 0x06, 0x79, 0x22, 0x7c, 0xf9, 0x7e, 0xda, 0xa5, 0x22, 0xc5,
0xac, 0xea, 0x28, 0xbc, 0x13, 0xd8, 0xeb, 0x65, 0x3c, 0xfd, 0x3a, 0xba, 0xdf, 0x57, 0x60, 0x67,
0xc0, 0x43, 0x2d, 0xfb, 0x0f, 0x50, 0x4b, 0xf2, 0x38, 0xc8, 0xf8, 0xad, 0x98, 0xeb, 0x9e, 0xe4,
0xb1, 0xcf, 0x6f, 0x05, 0xa9, 0x83, 0x15, 0xb1, 0xb8, 0xb8, 0x8f, 0x3a, 0xaa, 0xb5, 0x17, 0x92,
0x66, 0x32, 0x90, 0x2c, 0xc6, 0xe2, 0xc5, 0xd9, 0x1a, 0x39, 0x67, 0x31, 0x2a, 0x2e, 0x4c, 0x22,
0x93, 0xdc, 0x32, 0x5c, 0x98, 0x44, 0x3a, 0xb5, 0xbc, 0xc2, 0xd5, 0xcf, 0x5a, 0xe1, 0x7d, 0xa8,
0xa6, 0x3c, 0x2a, 0x9f, 0xa3, 0x09, 0xbc, 0x7d, 0x20, 0x2f, 0x50, 0x0e, 0x78, 0xa8, 0x5c, 0x99,
0xcb, 0xa3, 0xdc, 0x5d, 0x84, 0xbf, 0xc0, 0xdf, 0x26, 0x38, 0x29, 0x26, 0x11, 0x4b, 0x26, 0xca,
0x80, 0x42, 0x91, 0xc7, 0x10, 0xf9, 0x07, 0xec, 0x2b, 0x1e, 0x2e, 0x38, 0x7c, 0xb0, 0xca, 0xe1,
0x42, 0x7a, 0xbf, 0x76, 0x65, 0x0e, 0xa2, 0xfb, 0xce, 0x2a, 0x5e, 0xc2, 0x29, 0x8f, 0x90, 0x9c,
0x83, 0x5d, 0xfe, 0x32, 0x49, 0x6b, 0x15, 0xcb, 0xf2, 0x5f, 0xb5, 0xf1, 0xb1, 0x9b, 0x78, 0x1b,
0x84, 0x82, 0x5d, 0x2e, 0xfb, 0x6a, 0xd6, 0xe5, 0xd7, 0xdd, 0x58, 0xa7, 0x52, 0x2b, 0xeb, 0x6d,
0x10, 0x1f, 0x6a, 0xf3, 0x25, 0x25, 0xbf, 0xad, 0xea, 0x5b, 0x5a, 0xe3, 0x4f, 0x8d, 0x3d, 0x01,
0xe7, 0x91, 0x8b, 0xe4, 0x70, 0x15, 0xed, 0xd3, 0x0d, 0x68, 0xac, 0x57, 0x5b, 0x0c, 0x7f, 0xfc,
0xf7, 0xeb, 0xbf, 0x26, 0x4c, 0x5e, 0xe6, 0xa1, 0x1a, 0xa1, 0x63, 0x3a, 0xff, 0x60, 0xbc, 0x38,
0x75, 0x58, 0x22, 0x31, 0x4b, 0xe8, 0xb4, 0xa3, 0xc9, 0x3a, 0x25, 0x59, 0x1a, 0x86, 0xdb, 0x1a,
0x3a, 0xfa, 0x10, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x62, 0xeb, 0xe7, 0x24, 0x07, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// IndexNodeClient is the client API for IndexNode service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type IndexNodeClient interface {
CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
QueryJobs(ctx context.Context, in *QueryJobsRequest, opts ...grpc.CallOption) (*QueryJobsRespond, error)
DropJobs(ctx context.Context, in *DropJobsRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
GetJobStats(ctx context.Context, in *GetJobStatsRequest, opts ...grpc.CallOption) (*GetJobStatsRespond, error)
}
type indexNodeClient struct {
cc *grpc.ClientConn
}
func NewIndexNodeClient(cc *grpc.ClientConn) IndexNodeClient {
return &indexNodeClient{cc}
}
func (c *indexNodeClient) CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.indexnode.IndexNode/CreateJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *indexNodeClient) QueryJobs(ctx context.Context, in *QueryJobsRequest, opts ...grpc.CallOption) (*QueryJobsRespond, error) {
out := new(QueryJobsRespond)
err := c.cc.Invoke(ctx, "/milvus.proto.indexnode.IndexNode/QueryJobs", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *indexNodeClient) DropJobs(ctx context.Context, in *DropJobsRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.indexnode.IndexNode/DropJobs", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *indexNodeClient) GetJobStats(ctx context.Context, in *GetJobStatsRequest, opts ...grpc.CallOption) (*GetJobStatsRespond, error) {
out := new(GetJobStatsRespond)
err := c.cc.Invoke(ctx, "/milvus.proto.indexnode.IndexNode/GetJobStats", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// IndexNodeServer is the server API for IndexNode service.
type IndexNodeServer interface {
CreateJob(context.Context, *CreateJobRequest) (*commonpb.Status, error)
QueryJobs(context.Context, *QueryJobsRequest) (*QueryJobsRespond, error)
DropJobs(context.Context, *DropJobsRequest) (*commonpb.Status, error)
GetJobStats(context.Context, *GetJobStatsRequest) (*GetJobStatsRespond, error)
}
// UnimplementedIndexNodeServer can be embedded to have forward compatible implementations.
type UnimplementedIndexNodeServer struct {
}
func (*UnimplementedIndexNodeServer) CreateJob(ctx context.Context, req *CreateJobRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateJob not implemented")
}
func (*UnimplementedIndexNodeServer) QueryJobs(ctx context.Context, req *QueryJobsRequest) (*QueryJobsRespond, error) {
return nil, status.Errorf(codes.Unimplemented, "method QueryJobs not implemented")
}
func (*UnimplementedIndexNodeServer) DropJobs(ctx context.Context, req *DropJobsRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method DropJobs not implemented")
}
func (*UnimplementedIndexNodeServer) GetJobStats(ctx context.Context, req *GetJobStatsRequest) (*GetJobStatsRespond, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetJobStats not implemented")
}
func RegisterIndexNodeServer(s *grpc.Server, srv IndexNodeServer) {
s.RegisterService(&_IndexNode_serviceDesc, srv)
}
func _IndexNode_CreateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateJobRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IndexNodeServer).CreateJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.indexnode.IndexNode/CreateJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IndexNodeServer).CreateJob(ctx, req.(*CreateJobRequest))
}
return interceptor(ctx, in, info, handler)
}
func _IndexNode_QueryJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryJobsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IndexNodeServer).QueryJobs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.indexnode.IndexNode/QueryJobs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IndexNodeServer).QueryJobs(ctx, req.(*QueryJobsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _IndexNode_DropJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DropJobsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IndexNodeServer).DropJobs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.indexnode.IndexNode/DropJobs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IndexNodeServer).DropJobs(ctx, req.(*DropJobsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _IndexNode_GetJobStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetJobStatsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IndexNodeServer).GetJobStats(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.indexnode.IndexNode/GetJobStats",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IndexNodeServer).GetJobStats(ctx, req.(*GetJobStatsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _IndexNode_serviceDesc = grpc.ServiceDesc{
ServiceName: "milvus.proto.indexnode.IndexNode",
HandlerType: (*IndexNodeServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateJob",
Handler: _IndexNode_CreateJob_Handler,
},
{
MethodName: "QueryJobs",
Handler: _IndexNode_QueryJobs_Handler,
},
{
MethodName: "DropJobs",
Handler: _IndexNode_DropJobs_Handler,
},
{
MethodName: "GetJobStats",
Handler: _IndexNode_GetJobStats_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "index_node.proto",
}

View File

@ -19,10 +19,9 @@ package types
import (
"context"
clientv3 "go.etcd.io/etcd/client/v3"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/indexnodepb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
@ -30,6 +29,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/util/sessionutil"
clientv3 "go.etcd.io/etcd/client/v3"
)
// TimeTickProvider is the interface all services implement
@ -307,10 +307,16 @@ type IndexNode interface {
Component
TimeTickProvider
// CreateIndex receives request from IndexCoordinator to build an index.
// Index building is asynchronous, so when an index building request comes, IndexNode records the task and returns.
CreateIndex(ctx context.Context, req *indexpb.CreateIndexRequest) (*commonpb.Status, error)
GetTaskSlots(ctx context.Context, req *indexpb.GetTaskSlotsRequest) (*indexpb.GetTaskSlotsResponse, error)
// CreateJob receive index building job from indexcoord. Notes that index building is asynchronous, task is recorded
// in indexnode and then request is finished.
CreateJob(context.Context, *indexnodepb.CreateJobRequest) (*commonpb.Status, error)
// QueryJobs returns states of index building jobs specified by BuildIDs. There are four states of index building task
// Unissued, InProgress, Finished, Failed
QueryJobs(context.Context, *indexnodepb.QueryJobsRequest) (*indexnodepb.QueryJobsRespond, error)
// DropJobs cancel index building jobs specified by BuildIDs. Notes that dropping task may have finished.
DropJobs(context.Context, *indexnodepb.DropJobsRequest) (*commonpb.Status, error)
// GetJobStats returns metrics of indexnode, including available job queue info, available task slots and finished job infos.
GetJobStats(context.Context, *indexnodepb.GetJobStatsRequest) (*indexnodepb.GetJobStatsRespond, error)
// GetMetrics gets the metrics about IndexNode.
GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error)

View File

@ -167,9 +167,11 @@ func SetupLogger(cfg *log.Config) {
})
}
type logKey int
type logKey struct{}
const logCtxKey logKey = iota
var (
logCtxKey = logKey{}
)
// WithField adds given kv field to the logger in ctx
func WithField(ctx context.Context, key string, value string) context.Context {
@ -181,6 +183,14 @@ func WithField(ctx context.Context, key string, value string) context.Context {
return context.WithValue(ctx, logCtxKey, logger.With(zap.String(key, value)))
}
func WithFields(ctx context.Context, fields ...zap.Field) context.Context {
logger := log.L()
if ctxLogger, ok := ctx.Value(logCtxKey).(*zap.Logger); ok {
logger = ctxLogger
}
return context.WithValue(ctx, logCtxKey, logger.With(fields...))
}
// WithReqID adds given reqID field to the logger in ctx
func WithReqID(ctx context.Context, reqID int64) context.Context {
logger := log.L()

View File

@ -51,7 +51,7 @@ mkdir -p indexpb
mkdir -p datapb
mkdir -p querypb
mkdir -p planpb
mkdir -p indexnodepb
mkdir -p querypbv2
${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grpc,paths=source_relative:./commonpb common.proto
@ -69,6 +69,7 @@ ${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grp
${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grpc,paths=source_relative:./querypb query_coord.proto
${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grpc,paths=source_relative:./planpb plan.proto
${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grpc,paths=source_relative:./segcorepb segcore.proto
${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grpc,paths=source_relative:./indexnodepb index_node.proto
${protoc} --proto_path="${GOOGLE_PROTO_DIR}" --proto_path=. --go_out=plugins=grpc,paths=source_relative:./querypbv2 query_coordv2.proto
popd