diff --git a/docs/developer_guides/chap06_root_coordinator.md b/docs/developer_guides/chap06_root_coordinator.md index d759ef23d2..48e22f258b 100644 --- a/docs/developer_guides/chap06_root_coordinator.md +++ b/docs/developer_guides/chap06_root_coordinator.md @@ -627,6 +627,7 @@ _proxyMetaBlob_, _collectionInfoBlob_, _partitionInfoBlob_, _IndexInfoBlob_, _se type metaTable struct { txn kv.TxnKV // client of a reliable txnkv service, i.e. etcd client snapshot kv.SnapShotKV // client of a reliable snapshotkv service, i.e. etcd client + proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection_id -> meta collName2ID map[string]typeutil.UniqueID // collection name to collection id collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id @@ -640,6 +641,7 @@ type metaTable struct { func NewMetaTable(kv kv.SnapShotKV) (*metaTable, error) +func (mt *metaTable) AddProxy(po *pb.ProxyMeta) (typeutil.Timestamp, error) func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, part *pb.PartitionInfo, idx []*pb.IndexInfo, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error) func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.Timestamp, error) func (mt *metaTable) HasCollection(collID typeutil.UniqueID, ts typeutil.Timestamp) bool @@ -690,6 +692,7 @@ type timetickSync struct { func newTimeTickSync(core *Core) *timetickSync func (t *timetickSync) UpdateTimeTick(in *internalpb.ChannelTimeTickMsg) error +func (t *timetickSync) AddProxy(sess *sessionutil.Session) func (t *timetickSync) DelProxy(sess *sessionutil.Session) func (t *timetickSync) GetProxy(sess []*sessionutil.Session) func (t *timetickSync) StartWatch() diff --git a/go.mod b/go.mod index 4dbd873a6a..4e9f3393bf 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/gin-gonic/gin v1.7.7 github.com/go-basic/ipv4 v1.0.0 - github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.4.1 // indirect github.com/golang/mock v1.5.0 diff --git a/go.sum b/go.sum index 28accf2a81..c1125f271b 100644 --- a/go.sum +++ b/go.sum @@ -235,8 +235,6 @@ github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7a github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/goccy/go-json v0.7.10 h1:ulhbuNe1JqE68nMRXXTJRrUu0uhouf0VevLINxQq4Ec= github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= diff --git a/internal/distributed/rootcoord/service_test.go b/internal/distributed/rootcoord/service_test.go index e8413a0949..0ed0094747 100644 --- a/internal/distributed/rootcoord/service_test.go +++ b/internal/distributed/rootcoord/service_test.go @@ -26,8 +26,6 @@ import ( "testing" "time" - "github.com/milvus-io/milvus/internal/metastore/model" - clientv3 "go.etcd.io/etcd/client/v3" "github.com/golang/protobuf/proto" @@ -36,6 +34,7 @@ import ( rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client" "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/milvuspb" "github.com/milvus-io/milvus/internal/proto/proxypb" @@ -193,7 +192,7 @@ func TestGrpcService(t *testing.T) { var binlogLock sync.Mutex binlogPathArray := make([]string, 0, 16) - core.CallBuildIndexService = func(ctx context.Context, segID typeutil.UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (typeutil.UniqueID, error) { + core.CallBuildIndexService = func(ctx context.Context, segID typeutil.UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (typeutil.UniqueID, error) { binlogLock.Lock() defer binlogLock.Unlock() binlogPathArray = append(binlogPathArray, binlog...) @@ -474,7 +473,7 @@ func TestGrpcService(t *testing.T) { assert.Nil(t, err) assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) + assert.Equal(t, collMeta.ID, rsp.CollectionID) }) t.Run("show collection", func(t *testing.T) { @@ -511,8 +510,8 @@ func TestGrpcService(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) assert.Nil(t, err) - assert.Equal(t, 2, len(collMeta.Partitions)) - partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0) + assert.Equal(t, 2, len(collMeta.PartitionIDs)) + partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[1], 0) assert.Nil(t, err) assert.Equal(t, partName, partName2) assert.Equal(t, 1, len(collectionMetaCache)) @@ -548,7 +547,7 @@ func TestGrpcService(t *testing.T) { }, DbName: "testDb", CollectionName: collName, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, } rsp, err := cli.ShowPartitions(ctx, req) assert.Nil(t, err) @@ -560,8 +559,8 @@ func TestGrpcService(t *testing.T) { t.Run("show segment", func(t *testing.T) { coll, err := core.MetaTable.GetCollectionByName(collName, 0) assert.Nil(t, err) - partID := coll.Partitions[1].PartitionID - _, err = core.MetaTable.GetPartitionNameByID(coll.CollectionID, partID, 0) + partID := coll.PartitionIDs[1] + _, err = core.MetaTable.GetPartitionNameByID(coll.ID, partID, 0) assert.Nil(t, err) segLock.Lock() @@ -575,7 +574,7 @@ func TestGrpcService(t *testing.T) { Timestamp: 170, SourceID: 170, }, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, PartitionID: partID, } rsp, err := cli.ShowSegments(ctx, req) @@ -635,7 +634,7 @@ func TestGrpcService(t *testing.T) { Timestamp: 190, SourceID: 190, }, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, SegmentID: 1000, } rsp, err := cli.DescribeSegment(ctx, req) @@ -667,8 +666,8 @@ func TestGrpcService(t *testing.T) { t.Run("flush segment", func(t *testing.T) { coll, err := core.MetaTable.GetCollectionByName(collName, 0) assert.Nil(t, err) - partID := coll.Partitions[1].PartitionID - _, err = core.MetaTable.GetPartitionNameByID(coll.CollectionID, partID, 0) + partID := coll.PartitionIDs[1] + _, err = core.MetaTable.GetPartitionNameByID(coll.ID, partID, 0) assert.Nil(t, err) segLock.Lock() @@ -681,7 +680,7 @@ func TestGrpcService(t *testing.T) { }, Segment: &datapb.SegmentInfo{ ID: segID, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, PartitionID: partID, }, } @@ -756,8 +755,8 @@ func TestGrpcService(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) assert.Nil(t, err) - assert.Equal(t, 1, len(collMeta.Partitions)) - partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0) + assert.Equal(t, 1, len(collMeta.PartitionIDs)) + partName, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[0], 0) assert.Nil(t, err) assert.Equal(t, rootcoord.Params.CommonCfg.DefaultPartitionName, partName) assert.Equal(t, 2, len(collectionMetaCache)) diff --git a/internal/metastore/catalog.go b/internal/metastore/catalog.go deleted file mode 100644 index ec156d8a41..0000000000 --- a/internal/metastore/catalog.go +++ /dev/null @@ -1,37 +0,0 @@ -package metastore - -import ( - "context" - - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/milvus-io/milvus/internal/util/typeutil" -) - -type Catalog interface { - CreateCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error - GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) - GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) - ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) - CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool - DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error - - CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error - DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error - - CreateIndex(ctx context.Context, col *model.Collection, index *model.Index) error - AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index) error - DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error - ListIndexes(ctx context.Context) ([]*model.Index, error) - - CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error - DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error - AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error - ListAliases(ctx context.Context) ([]*model.Collection, error) - - GetCredential(ctx context.Context, username string) (*model.Credential, error) - CreateCredential(ctx context.Context, credential *model.Credential) error - DropCredential(ctx context.Context, username string) error - ListCredentials(ctx context.Context) ([]string, error) - - Close() -} diff --git a/internal/metastore/kv/constant.go b/internal/metastore/kv/constant.go deleted file mode 100644 index 2222954c28..0000000000 --- a/internal/metastore/kv/constant.go +++ /dev/null @@ -1,24 +0,0 @@ -package kv - -const ( - // ComponentPrefix prefix for rootcoord component - ComponentPrefix = "root-coord" - - // CollectionMetaPrefix prefix for collection meta - CollectionMetaPrefix = ComponentPrefix + "/collection" - - // SegmentIndexMetaPrefix prefix for segment index meta - SegmentIndexMetaPrefix = ComponentPrefix + "/segment-index" - - // IndexMetaPrefix prefix for index meta - IndexMetaPrefix = ComponentPrefix + "/index" - - // CollectionAliasMetaPrefix prefix for collection alias meta - CollectionAliasMetaPrefix = ComponentPrefix + "/collection-alias" - - // UserSubPrefix subpath for credential user - UserSubPrefix = "/credential/users" - - // CredentialPrefix prefix for credential user - CredentialPrefix = ComponentPrefix + UserSubPrefix -) diff --git a/internal/metastore/kv/kv_catalog.go b/internal/metastore/kv/kv_catalog.go deleted file mode 100644 index 05b01f3c0c..0000000000 --- a/internal/metastore/kv/kv_catalog.go +++ /dev/null @@ -1,529 +0,0 @@ -package kv - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "path" - "strconv" - - "github.com/golang/protobuf/proto" - "github.com/milvus-io/milvus/internal/kv" - "github.com/milvus-io/milvus/internal/log" - "github.com/milvus-io/milvus/internal/metastore/model" - pb "github.com/milvus-io/milvus/internal/proto/etcdpb" - "github.com/milvus-io/milvus/internal/proto/internalpb" - "github.com/milvus-io/milvus/internal/proto/schemapb" - "github.com/milvus-io/milvus/internal/util/typeutil" - "go.uber.org/zap" -) - -type Catalog struct { - Txn kv.TxnKV - Snapshot kv.SnapShotKV -} - -func (kc *Catalog) CreateCollection(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { - k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.CollectionID) - collInfo := model.ConvertToCollectionPB(coll) - v1, err := proto.Marshal(collInfo) - if err != nil { - log.Error("create collection marshal fail", zap.String("key", k1), zap.Error(err)) - return err - } - - // save ddOpStr into etcd - kvs := map[string]string{k1: string(v1)} - for k, v := range coll.Extra { - kvs[k] = v - } - - err = kc.Snapshot.MultiSave(kvs, ts) - if err != nil { - log.Error("create collection persist meta fail", zap.String("key", k1), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { - k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.CollectionID) - collInfo := model.ConvertToCollectionPB(coll) - v1, err := proto.Marshal(collInfo) - if err != nil { - log.Error("create partition marshal fail", zap.String("key", k1), zap.Error(err)) - return err - } - - kvs := map[string]string{k1: string(v1)} - err = kc.Snapshot.MultiSave(kvs, ts) - if err != nil { - log.Error("create partition persist meta fail", zap.String("key", k1), zap.Error(err)) - return err - } - - // save ddOpStr into etcd - err = kc.Txn.MultiSave(coll.Extra) - if err != nil { - // will not panic, missing create msg - log.Warn("create partition persist ddop meta fail", zap.Int64("collectionID", coll.CollectionID), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) CreateIndex(ctx context.Context, col *model.Collection, index *model.Index) error { - k1 := path.Join(CollectionMetaPrefix, strconv.FormatInt(col.CollectionID, 10)) - v1, err := proto.Marshal(model.ConvertToCollectionPB(col)) - if err != nil { - log.Error("create index marshal fail", zap.String("key", k1), zap.Error(err)) - return err - } - - k2 := path.Join(IndexMetaPrefix, strconv.FormatInt(index.IndexID, 10)) - v2, err := proto.Marshal(model.ConvertToIndexPB(index)) - if err != nil { - log.Error("create index marshal fail", zap.String("key", k2), zap.Error(err)) - return err - } - meta := map[string]string{k1: string(v1), k2: string(v2)} - - err = kc.Txn.MultiSave(meta) - if err != nil { - log.Error("create index persist meta fail", zap.String("key", k1), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index) error { - kvs := make(map[string]string, len(newIndex.SegmentIndexes)) - for _, segmentIndex := range newIndex.SegmentIndexes { - segment := segmentIndex.Segment - k := fmt.Sprintf("%s/%d/%d/%d/%d", SegmentIndexMetaPrefix, newIndex.CollectionID, newIndex.IndexID, segment.PartitionID, segment.SegmentID) - segIdxInfo := &pb.SegmentIndexInfo{ - CollectionID: newIndex.CollectionID, - PartitionID: segment.PartitionID, - SegmentID: segment.SegmentID, - BuildID: segmentIndex.BuildID, - EnableIndex: segmentIndex.EnableIndex, - FieldID: newIndex.FieldID, - IndexID: newIndex.IndexID, - } - - v, err := proto.Marshal(segIdxInfo) - if err != nil { - log.Error("alter index marshal fail", zap.String("key", k), zap.Error(err)) - return err - } - - kvs[k] = string(v) - } - - err := kc.Txn.MultiSave(kvs) - if err != nil { - log.Error("alter index persist meta fail", zap.Any("segmentIndex", newIndex.SegmentIndexes), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error { - k := fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collection.Aliases[0]) - v, err := proto.Marshal(&pb.CollectionInfo{ID: collection.CollectionID, Schema: &schemapb.CollectionSchema{Name: collection.Aliases[0]}}) - if err != nil { - log.Error("create alias marshal fail", zap.String("key", k), zap.Error(err)) - return err - } - - err = kc.Snapshot.Save(k, string(v), ts) - if err != nil { - log.Error("create alias persist meta fail", zap.String("key", k), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) CreateCredential(ctx context.Context, credential *model.Credential) error { - k := fmt.Sprintf("%s/%s", CredentialPrefix, credential.Username) - v, err := json.Marshal(&internalpb.CredentialInfo{EncryptedPassword: credential.EncryptedPassword}) - if err != nil { - log.Error("create credential marshal fail", zap.String("key", k), zap.Error(err)) - return err - } - - err = kc.Txn.Save(k, string(v)) - if err != nil { - log.Error("create credential persist meta fail", zap.String("key", k), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) { - collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionID) - collVal, err := kc.Snapshot.Load(collKey, ts) - if err != nil { - log.Error("get collection meta fail", zap.String("key", collKey), zap.Error(err)) - return nil, err - } - - collMeta := &pb.CollectionInfo{} - err = proto.Unmarshal([]byte(collVal), collMeta) - if err != nil { - log.Error("collection meta marshal fail", zap.String("key", collKey), zap.Error(err)) - return nil, err - } - - return model.ConvertCollectionPBToModel(collMeta, map[string]string{}), nil -} - -func (kc *Catalog) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool { - _, err := kc.GetCollectionByID(ctx, collectionID, ts) - return err == nil -} - -func (kc *Catalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) { - k := fmt.Sprintf("%s/%s", CredentialPrefix, username) - v, err := kc.Txn.Load(k) - if err != nil { - log.Warn("get credential meta fail", zap.String("key", k), zap.Error(err)) - return nil, err - } - - credentialInfo := internalpb.CredentialInfo{} - err = json.Unmarshal([]byte(v), &credentialInfo) - if err != nil { - return nil, fmt.Errorf("unmarshal credential info err:%w", err) - } - - return &model.Credential{Username: username, EncryptedPassword: credentialInfo.EncryptedPassword}, nil -} - -func (kc *Catalog) AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error { - return kc.CreateAlias(ctx, collection, ts) -} - -func (kc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error { - delMetakeysSnap := []string{ - fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionInfo.CollectionID), - } - for _, alias := range collectionInfo.Aliases { - delMetakeysSnap = append(delMetakeysSnap, - fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias), - ) - } - - err := kc.Snapshot.MultiSaveAndRemoveWithPrefix(map[string]string{}, delMetakeysSnap, ts) - if err != nil { - log.Error("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err)) - return err - } - - // Txn operation - kvs := map[string]string{} - for k, v := range collectionInfo.Extra { - kvs[k] = v - } - - delMetaKeysTxn := []string{ - fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collectionInfo.CollectionID), - fmt.Sprintf("%s/%d", IndexMetaPrefix, collectionInfo.CollectionID), - } - - err = kc.Txn.MultiSaveAndRemoveWithPrefix(kvs, delMetaKeysTxn) - if err != nil { - log.Warn("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error { - collMeta := model.ConvertToCollectionPB(collectionInfo) - k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collectionInfo.CollectionID, 10)) - v, err := proto.Marshal(collMeta) - if err != nil { - log.Error("drop partition marshal fail", zap.String("key", k), zap.Error(err)) - return err - } - - err = kc.Snapshot.Save(k, string(v), ts) - if err != nil { - log.Error("drop partition update collection meta fail", - zap.Int64("collectionID", collectionInfo.CollectionID), - zap.Int64("partitionID", partitionID), - zap.Error(err)) - return err - } - - var delMetaKeys []string - for _, idxInfo := range collMeta.FieldIndexes { - k := fmt.Sprintf("%s/%d/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, idxInfo.IndexID, partitionID) - delMetaKeys = append(delMetaKeys, k) - } - - // Txn operation - metaTxn := map[string]string{} - for k, v := range collectionInfo.Extra { - metaTxn[k] = v - } - err = kc.Txn.MultiSaveAndRemoveWithPrefix(metaTxn, delMetaKeys) - if err != nil { - log.Warn("drop partition update meta fail", - zap.Int64("collectionID", collectionInfo.CollectionID), - zap.Int64("partitionID", partitionID), - zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error { - collMeta := model.ConvertToCollectionPB(collectionInfo) - k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collectionInfo.CollectionID, 10)) - v, err := proto.Marshal(collMeta) - if err != nil { - log.Error("drop index marshal fail", zap.String("key", k), zap.Error(err)) - return err - } - - saveMeta := map[string]string{k: string(v)} - - delMeta := []string{ - fmt.Sprintf("%s/%d/%d", SegmentIndexMetaPrefix, collectionInfo.CollectionID, dropIdxID), - fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, collectionInfo.CollectionID, dropIdxID), - } - - err = kc.Txn.MultiSaveAndRemoveWithPrefix(saveMeta, delMeta) - if err != nil { - log.Error("drop partition update meta fail", - zap.Int64("collectionID", collectionInfo.CollectionID), - zap.Int64("indexID", dropIdxID), - zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) DropCredential(ctx context.Context, username string) error { - k := fmt.Sprintf("%s/%s", CredentialPrefix, username) - err := kc.Txn.Remove(k) - if err != nil { - log.Error("drop credential update meta fail", zap.String("key", k), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error { - delMetakeys := []string{ - fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias), - } - - meta := make(map[string]string) - err := kc.Snapshot.MultiSaveAndRemoveWithPrefix(meta, delMetakeys, ts) - if err != nil { - log.Error("drop alias update meta fail", zap.String("alias", alias), zap.Error(err)) - return err - } - - return nil -} - -func (kc *Catalog) GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) { - _, vals, err := kc.Snapshot.LoadWithPrefix(CollectionMetaPrefix, ts) - if err != nil { - log.Warn("get collection meta fail", zap.String("collectionName", collectionName), zap.Error(err)) - return nil, err - } - - for _, val := range vals { - colMeta := pb.CollectionInfo{} - err = proto.Unmarshal([]byte(val), &colMeta) - if err != nil { - log.Warn("get collection meta unmarshal fail", zap.String("collectionName", collectionName), zap.Error(err)) - continue - } - if colMeta.Schema.Name == collectionName { - return model.ConvertCollectionPBToModel(&colMeta, map[string]string{}), nil - } - } - - return nil, fmt.Errorf("can't find collection: %s, at timestamp = %d", collectionName, ts) -} - -func (kc *Catalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) { - _, vals, err := kc.Snapshot.LoadWithPrefix(CollectionMetaPrefix, ts) - if err != nil { - log.Error("get collections meta fail", - zap.String("prefix", CollectionMetaPrefix), - zap.Uint64("timestamp", ts), - zap.Error(err)) - return nil, nil - } - - colls := make(map[string]*model.Collection) - for _, val := range vals { - collMeta := pb.CollectionInfo{} - err := proto.Unmarshal([]byte(val), &collMeta) - if err != nil { - log.Warn("unmarshal collection info failed", zap.Error(err)) - continue - } - colls[collMeta.Schema.Name] = model.ConvertCollectionPBToModel(&collMeta, map[string]string{}) - } - - return colls, nil -} - -func (kc *Catalog) ListAliases(ctx context.Context) ([]*model.Collection, error) { - _, values, err := kc.Snapshot.LoadWithPrefix(CollectionAliasMetaPrefix, 0) - if err != nil { - log.Error("get aliases meta fail", zap.String("prefix", CollectionAliasMetaPrefix), zap.Error(err)) - return nil, err - } - - var colls []*model.Collection - for _, value := range values { - aliasInfo := pb.CollectionInfo{} - err = proto.Unmarshal([]byte(value), &aliasInfo) - if err != nil { - log.Warn("unmarshal aliases failed", zap.Error(err)) - continue - } - colls = append(colls, model.ConvertCollectionPBToModel(&aliasInfo, map[string]string{})) - } - - return colls, nil -} - -func (kc *Catalog) listSegmentIndexes(ctx context.Context) (map[int64]*model.Index, error) { - _, values, err := kc.Txn.LoadWithPrefix(SegmentIndexMetaPrefix) - if err != nil { - log.Error("list segment index meta fail", zap.String("prefix", SegmentIndexMetaPrefix), zap.Error(err)) - return nil, err - } - - indexes := make(map[int64]*model.Index, len(values)) - for _, value := range values { - if bytes.Equal([]byte(value), SuffixSnapshotTombstone) { - // backward compatibility, IndexMeta used to be in SnapshotKV - continue - } - segmentIndexInfo := pb.SegmentIndexInfo{} - err = proto.Unmarshal([]byte(value), &segmentIndexInfo) - if err != nil { - log.Warn("unmarshal segment index info failed", zap.Error(err)) - continue - } - - newIndex := model.ConvertSegmentIndexPBToModel(&segmentIndexInfo) - oldIndex, ok := indexes[segmentIndexInfo.IndexID] - if ok { - for segID, segmentIdxInfo := range newIndex.SegmentIndexes { - oldIndex.SegmentIndexes[segID] = segmentIdxInfo - } - } else { - indexes[segmentIndexInfo.IndexID] = newIndex - } - } - - return indexes, nil -} - -func (kc *Catalog) listIndexMeta(ctx context.Context) (map[int64]*model.Index, error) { - _, values, err := kc.Txn.LoadWithPrefix(IndexMetaPrefix) - if err != nil { - log.Error("list index meta fail", zap.String("prefix", IndexMetaPrefix), zap.Error(err)) - return nil, err - } - - indexes := make(map[int64]*model.Index, len(values)) - for _, value := range values { - if bytes.Equal([]byte(value), SuffixSnapshotTombstone) { - // backward compatibility, IndexMeta used to be in SnapshotKV - continue - } - meta := pb.IndexInfo{} - err = proto.Unmarshal([]byte(value), &meta) - if err != nil { - log.Warn("unmarshal index info failed", zap.Error(err)) - continue - } - - index := model.ConvertIndexPBToModel(&meta) - if _, ok := indexes[meta.IndexID]; ok { - log.Warn("duplicated index id exists in index meta", zap.Int64("index id", meta.IndexID)) - } - - indexes[meta.IndexID] = index - } - - return indexes, nil -} - -func (kc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) { - indexMeta, err := kc.listIndexMeta(ctx) - if err != nil { - return nil, err - } - - segmentIndexMeta, err := kc.listSegmentIndexes(ctx) - if err != nil { - return nil, err - } - - var indexes []*model.Index - //merge index and segment index - for indexID, index := range indexMeta { - segmentIndex, ok := segmentIndexMeta[indexID] - if ok { - index = model.MergeIndexModel(index, segmentIndex) - delete(segmentIndexMeta, indexID) - } - indexes = append(indexes, index) - } - - // add remain segmentIndexMeta - for _, index := range segmentIndexMeta { - indexes = append(indexes, index) - } - - return indexes, nil -} - -func (kc *Catalog) ListCredentials(ctx context.Context) ([]string, error) { - keys, _, err := kc.Txn.LoadWithPrefix(CredentialPrefix) - if err != nil { - log.Error("list all credential usernames fail", zap.String("prefix", CredentialPrefix), zap.Error(err)) - return nil, err - } - - var usernames []string - for _, path := range keys { - username := typeutil.After(path, UserSubPrefix+"/") - if len(username) == 0 { - log.Warn("no username extract from path:", zap.String("path", path)) - continue - } - usernames = append(usernames, username) - } - - return usernames, nil -} - -func (kc *Catalog) Close() { - // do nothing -} diff --git a/internal/metastore/model/collection.go b/internal/metastore/model/collection.go deleted file mode 100644 index ab22d2c1ec..0000000000 --- a/internal/metastore/model/collection.go +++ /dev/null @@ -1,24 +0,0 @@ -package model - -import ( - "github.com/milvus-io/milvus/internal/proto/commonpb" -) - -type Collection struct { - TenantID string - CollectionID int64 - Partitions []*Partition - Name string - Description string - AutoID bool - Fields []*Field - FieldIndexes []*Index - VirtualChannelNames []string - PhysicalChannelNames []string - ShardsNum int32 - StartPositions []*commonpb.KeyDataPair - CreateTime uint64 - ConsistencyLevel commonpb.ConsistencyLevel - Aliases []string - Extra map[string]string // extra kvs -} diff --git a/internal/metastore/model/credential.go b/internal/metastore/model/credential.go deleted file mode 100644 index b52524da32..0000000000 --- a/internal/metastore/model/credential.go +++ /dev/null @@ -1,6 +0,0 @@ -package model - -type Credential struct { - Username string - EncryptedPassword string -} diff --git a/internal/metastore/model/field.go b/internal/metastore/model/field.go deleted file mode 100644 index 122eec6e98..0000000000 --- a/internal/metastore/model/field.go +++ /dev/null @@ -1,17 +0,0 @@ -package model - -import ( - "github.com/milvus-io/milvus/internal/proto/commonpb" - "github.com/milvus-io/milvus/internal/proto/schemapb" -) - -type Field struct { - FieldID int64 - Name string - IsPrimaryKey bool - Description string - DataType schemapb.DataType - TypeParams []*commonpb.KeyValuePair - IndexParams []*commonpb.KeyValuePair - AutoID bool -} diff --git a/internal/metastore/model/index.go b/internal/metastore/model/index.go deleted file mode 100644 index 8cd8e699b7..0000000000 --- a/internal/metastore/model/index.go +++ /dev/null @@ -1,13 +0,0 @@ -package model - -import "github.com/milvus-io/milvus/internal/proto/commonpb" - -type Index struct { - CollectionID int64 - FieldID int64 - IndexID int64 - IndexName string - IndexParams []*commonpb.KeyValuePair - SegmentIndexes map[int64]SegmentIndex //segmentID -> segmentIndex - Extra map[string]string -} diff --git a/internal/metastore/model/model_utils.go b/internal/metastore/model/model_utils.go deleted file mode 100644 index 13e6db61ce..0000000000 --- a/internal/metastore/model/model_utils.go +++ /dev/null @@ -1,237 +0,0 @@ -package model - -import ( - pb "github.com/milvus-io/milvus/internal/proto/etcdpb" - "github.com/milvus-io/milvus/internal/proto/internalpb" - "github.com/milvus-io/milvus/internal/proto/schemapb" -) - -func ConvertToFieldSchemaPB(field *Field) *schemapb.FieldSchema { - return &schemapb.FieldSchema{ - FieldID: field.FieldID, - Name: field.Name, - IsPrimaryKey: field.IsPrimaryKey, - Description: field.Description, - DataType: field.DataType, - TypeParams: field.TypeParams, - IndexParams: field.IndexParams, - AutoID: field.AutoID, - } -} - -func BatchConvertToFieldSchemaPB(fields []*Field) []*schemapb.FieldSchema { - fieldSchemas := make([]*schemapb.FieldSchema, len(fields)) - for idx, field := range fields { - fieldSchemas[idx] = ConvertToFieldSchemaPB(field) - } - return fieldSchemas -} - -func ConvertFieldPBToModel(fieldSchema *schemapb.FieldSchema) *Field { - return &Field{ - FieldID: fieldSchema.FieldID, - Name: fieldSchema.Name, - IsPrimaryKey: fieldSchema.IsPrimaryKey, - Description: fieldSchema.Description, - DataType: fieldSchema.DataType, - TypeParams: fieldSchema.TypeParams, - IndexParams: fieldSchema.IndexParams, - AutoID: fieldSchema.AutoID, - } -} - -func BatchConvertFieldPBToModel(fieldSchemas []*schemapb.FieldSchema) []*Field { - fields := make([]*Field, len(fieldSchemas)) - for idx, fieldSchema := range fieldSchemas { - fields[idx] = ConvertFieldPBToModel(fieldSchema) - } - return fields -} - -func ConvertCollectionPBToModel(coll *pb.CollectionInfo, extra map[string]string) *Collection { - partitions := make([]*Partition, len(coll.PartitionIDs)) - for idx := range coll.PartitionIDs { - partitions[idx] = &Partition{ - PartitionID: coll.PartitionIDs[idx], - PartitionName: coll.PartitionNames[idx], - PartitionCreatedTimestamp: coll.PartitionCreatedTimestamps[idx], - } - } - indexes := make([]*Index, len(coll.FieldIndexes)) - for idx, fieldIndexInfo := range coll.FieldIndexes { - indexes[idx] = &Index{ - FieldID: fieldIndexInfo.FiledID, - IndexID: fieldIndexInfo.IndexID, - } - } - return &Collection{ - CollectionID: coll.ID, - Name: coll.Schema.Name, - Description: coll.Schema.Description, - AutoID: coll.Schema.AutoID, - Fields: BatchConvertFieldPBToModel(coll.Schema.Fields), - Partitions: partitions, - FieldIndexes: indexes, - VirtualChannelNames: coll.VirtualChannelNames, - PhysicalChannelNames: coll.PhysicalChannelNames, - ShardsNum: coll.ShardsNum, - ConsistencyLevel: coll.ConsistencyLevel, - CreateTime: coll.CreateTime, - StartPositions: coll.StartPositions, - Extra: extra, - } -} - -func CloneCollectionModel(coll Collection) *Collection { - return &Collection{ - TenantID: coll.TenantID, - CollectionID: coll.CollectionID, - Name: coll.Name, - Description: coll.Description, - AutoID: coll.AutoID, - Fields: coll.Fields, - Partitions: coll.Partitions, - FieldIndexes: coll.FieldIndexes, - VirtualChannelNames: coll.VirtualChannelNames, - PhysicalChannelNames: coll.PhysicalChannelNames, - ShardsNum: coll.ShardsNum, - ConsistencyLevel: coll.ConsistencyLevel, - CreateTime: coll.CreateTime, - StartPositions: coll.StartPositions, - Aliases: coll.Aliases, - Extra: coll.Extra, - } -} - -func ConvertToCollectionPB(coll *Collection) *pb.CollectionInfo { - fields := make([]*schemapb.FieldSchema, len(coll.Fields)) - for idx, field := range coll.Fields { - fields[idx] = &schemapb.FieldSchema{ - FieldID: field.FieldID, - Name: field.Name, - IsPrimaryKey: field.IsPrimaryKey, - Description: field.Description, - DataType: field.DataType, - TypeParams: field.TypeParams, - IndexParams: field.IndexParams, - AutoID: field.AutoID, - } - } - collSchema := &schemapb.CollectionSchema{ - Name: coll.Name, - Description: coll.Description, - AutoID: coll.AutoID, - Fields: fields, - } - partitionIDs := make([]int64, len(coll.Partitions)) - partitionNames := make([]string, len(coll.Partitions)) - partitionCreatedTimestamps := make([]uint64, len(coll.Partitions)) - for idx, partition := range coll.Partitions { - partitionIDs[idx] = partition.PartitionID - partitionNames[idx] = partition.PartitionName - partitionCreatedTimestamps[idx] = partition.PartitionCreatedTimestamp - } - fieldIndexes := make([]*pb.FieldIndexInfo, len(coll.FieldIndexes)) - for idx, index := range coll.FieldIndexes { - fieldIndexes[idx] = &pb.FieldIndexInfo{ - FiledID: index.FieldID, - IndexID: index.IndexID, - } - } - return &pb.CollectionInfo{ - ID: coll.CollectionID, - Schema: collSchema, - PartitionIDs: partitionIDs, - PartitionNames: partitionNames, - FieldIndexes: fieldIndexes, - CreateTime: coll.CreateTime, - VirtualChannelNames: coll.VirtualChannelNames, - PhysicalChannelNames: coll.PhysicalChannelNames, - ShardsNum: coll.ShardsNum, - PartitionCreatedTimestamps: partitionCreatedTimestamps, - ConsistencyLevel: coll.ConsistencyLevel, - StartPositions: coll.StartPositions, - } -} - -func MergeIndexModel(a *Index, b *Index) *Index { - if b.SegmentIndexes != nil { - if a.SegmentIndexes == nil { - a.SegmentIndexes = b.SegmentIndexes - } else { - for segID, segmentIndex := range b.SegmentIndexes { - a.SegmentIndexes[segID] = segmentIndex - } - } - } - - if a.CollectionID == 0 && b.CollectionID != 0 { - a.CollectionID = b.CollectionID - } - - if a.FieldID == 0 && b.FieldID != 0 { - a.FieldID = b.FieldID - } - - if a.IndexID == 0 && b.IndexID != 0 { - a.IndexID = b.IndexID - } - - if a.IndexName == "" && b.IndexName != "" { - a.IndexName = b.IndexName - } - - if a.IndexParams == nil && b.IndexParams != nil { - a.IndexParams = b.IndexParams - } - - if a.Extra == nil && b.Extra != nil { - a.Extra = b.Extra - } - - return a -} - -func ConvertSegmentIndexPBToModel(segIndex *pb.SegmentIndexInfo) *Index { - return &Index{ - CollectionID: segIndex.CollectionID, - SegmentIndexes: map[int64]SegmentIndex{ - segIndex.SegmentID: { - Segment: Segment{ - SegmentID: segIndex.SegmentID, - PartitionID: segIndex.PartitionID, - }, - BuildID: segIndex.BuildID, - EnableIndex: segIndex.EnableIndex, - }, - }, - FieldID: segIndex.FieldID, - IndexID: segIndex.IndexID, - } -} - -func ConvertIndexPBToModel(indexInfo *pb.IndexInfo) *Index { - return &Index{ - IndexName: indexInfo.IndexName, - IndexID: indexInfo.IndexID, - IndexParams: indexInfo.IndexParams, - } -} - -func ConvertToIndexPB(index *Index) *pb.IndexInfo { - return &pb.IndexInfo{ - IndexName: index.IndexName, - IndexID: index.IndexID, - IndexParams: index.IndexParams, - } -} - -func ConvertToCredentialPB(cred *Credential) *internalpb.CredentialInfo { - if cred == nil { - return nil - } - return &internalpb.CredentialInfo{ - Username: cred.Username, - EncryptedPassword: cred.EncryptedPassword, - } -} diff --git a/internal/metastore/model/partition.go b/internal/metastore/model/partition.go deleted file mode 100644 index facfefee0c..0000000000 --- a/internal/metastore/model/partition.go +++ /dev/null @@ -1,8 +0,0 @@ -package model - -type Partition struct { - PartitionID int64 - PartitionName string - PartitionCreatedTimestamp uint64 - Extra map[string]string -} diff --git a/internal/metastore/model/segment.go b/internal/metastore/model/segment.go deleted file mode 100644 index 1023c91ec9..0000000000 --- a/internal/metastore/model/segment.go +++ /dev/null @@ -1,25 +0,0 @@ -package model - -import "github.com/milvus-io/milvus/internal/proto/commonpb" - -type Segment struct { - SegmentID int64 - PartitionID int64 - NumRows int64 - MemSize int64 - DmChannel string - CompactionFrom []int64 - CreatedByCompaction bool - SegmentState commonpb.SegmentState - IndexInfos []*SegmentIndex - ReplicaIds []int64 - NodeIds []int64 -} - -type SegmentIndex struct { - Segment - EnableIndex bool - BuildID int64 - IndexSize uint64 - IndexFilePaths []string -} diff --git a/internal/metastore/table/table_catalog.go b/internal/metastore/table/table_catalog.go deleted file mode 100644 index 2993558d22..0000000000 --- a/internal/metastore/table/table_catalog.go +++ /dev/null @@ -1,91 +0,0 @@ -package table - -import ( - "context" - - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/milvus-io/milvus/internal/util/typeutil" -) - -type Catalog struct { -} - -func (tc *Catalog) CreateCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) GetCollectionByID(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) { - return nil, nil -} - -func (tc *Catalog) GetCollectionByName(ctx context.Context, collectionName string, ts typeutil.Timestamp) (*model.Collection, error) { - return nil, nil -} - -func (tc *Catalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) { - return nil, nil -} - -func (tc *Catalog) CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool { - return false -} - -func (tc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) CreatePartition(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) DropPartition(ctx context.Context, collectionInfo *model.Collection, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) AlterIndex(ctx context.Context, index *model.Index) error { - return nil -} - -func (tc *Catalog) DropIndex(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) ListIndexes(ctx context.Context) ([]*model.Index, error) { - return nil, nil -} - -func (tc *Catalog) CreateAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) DropAlias(ctx context.Context, collectionID typeutil.UniqueID, alias string, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) AlterAlias(ctx context.Context, collection *model.Collection, ts typeutil.Timestamp) error { - return nil -} - -func (tc *Catalog) ListAliases(ctx context.Context) ([]*model.Collection, error) { - return nil, nil -} - -func (tc *Catalog) GetCredential(ctx context.Context, username string) (*model.Credential, error) { - return nil, nil -} - -func (tc *Catalog) CreateCredential(ctx context.Context, credential *model.Credential) error { - return nil -} - -func (tc *Catalog) DropCredential(ctx context.Context, username string) error { - return nil -} - -func (tc *Catalog) ListCredentials(ctx context.Context) ([]string, error) { - return nil, nil -} - -func (tc *Catalog) Close() { - -} diff --git a/internal/metastore/kv/meta_snapshot.go b/internal/rootcoord/meta_snapshot.go similarity index 88% rename from internal/metastore/kv/meta_snapshot.go rename to internal/rootcoord/meta_snapshot.go index 4533c48b87..4df01fb647 100644 --- a/internal/metastore/kv/meta_snapshot.go +++ b/internal/rootcoord/meta_snapshot.go @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kv +package rootcoord import ( "context" @@ -40,7 +40,7 @@ type rtPair struct { ts typeutil.Timestamp } -type MetaSnapshot struct { +type metaSnapshot struct { cli *clientv3.Client root string tsKey string @@ -52,11 +52,11 @@ type MetaSnapshot struct { numTs int } -func NewMetaSnapshot(cli *clientv3.Client, root, tsKey string, bufSize int) (*MetaSnapshot, error) { +func newMetaSnapshot(cli *clientv3.Client, root, tsKey string, bufSize int) (*metaSnapshot, error) { if bufSize <= 0 { bufSize = 1024 } - ms := &MetaSnapshot{ + ms := &metaSnapshot{ cli: cli, root: root, tsKey: tsKey, @@ -72,7 +72,7 @@ func NewMetaSnapshot(cli *clientv3.Client, root, tsKey string, bufSize int) (*Me return ms, nil } -func (ms *MetaSnapshot) loadTs() error { +func (ms *metaSnapshot) loadTs() error { ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) defer cancel() @@ -115,12 +115,12 @@ func (ms *MetaSnapshot) loadTs() error { return nil } if curVer == version { - log.Debug("Snapshot found save version with different revision", zap.Int64("revision", revision), zap.Int64("version", version)) + log.Debug("snapshot found save version with different revision", zap.Int64("revision", revision), zap.Int64("version", version)) } strTs := string(resp.Kvs[0].Value) if strTs == "0" { //#issue 7150, index building inserted "0", skipping - //this is a special fix for backward compatibility, the previous version will put 0 ts into the Snapshot building index + //this is a special fix for backward compatibility, the previous version will put 0 ts into the snapshot building index continue } curTs, err := strconv.ParseUint(strTs, 10, 64) @@ -139,16 +139,16 @@ func (ms *MetaSnapshot) loadTs() error { return nil } -func (ms *MetaSnapshot) maxTs() typeutil.Timestamp { +func (ms *metaSnapshot) maxTs() typeutil.Timestamp { return ms.ts2Rev[ms.maxPos].ts } -func (ms *MetaSnapshot) minTs() typeutil.Timestamp { +func (ms *metaSnapshot) minTs() typeutil.Timestamp { return ms.ts2Rev[ms.minPos].ts } -func (ms *MetaSnapshot) initTs(rev int64, ts typeutil.Timestamp) { - log.Debug("init meta Snapshot ts", zap.Int64("rev", rev), zap.Uint64("ts", ts)) +func (ms *metaSnapshot) initTs(rev int64, ts typeutil.Timestamp) { + log.Debug("init meta snapshot ts", zap.Int64("rev", rev), zap.Uint64("ts", ts)) if ms.numTs == 0 { ms.maxPos = len(ms.ts2Rev) - 1 ms.minPos = len(ms.ts2Rev) - 1 @@ -163,7 +163,7 @@ func (ms *MetaSnapshot) initTs(rev int64, ts typeutil.Timestamp) { } } -func (ms *MetaSnapshot) putTs(rev int64, ts typeutil.Timestamp) { +func (ms *metaSnapshot) putTs(rev int64, ts typeutil.Timestamp) { log.Debug("put meta snapshto ts", zap.Int64("rev", rev), zap.Uint64("ts", ts)) ms.maxPos++ if ms.maxPos == len(ms.ts2Rev) { @@ -182,7 +182,7 @@ func (ms *MetaSnapshot) putTs(rev int64, ts typeutil.Timestamp) { } } -func (ms *MetaSnapshot) searchOnCache(ts typeutil.Timestamp, start, length int) int64 { +func (ms *metaSnapshot) searchOnCache(ts typeutil.Timestamp, start, length int) int64 { if length == 1 { return ms.ts2Rev[start].rev } @@ -208,7 +208,7 @@ func (ms *MetaSnapshot) searchOnCache(ts typeutil.Timestamp, start, length int) } } -func (ms *MetaSnapshot) getRevOnCache(ts typeutil.Timestamp) int64 { +func (ms *metaSnapshot) getRevOnCache(ts typeutil.Timestamp) int64 { if ms.numTs == 0 { return 0 } @@ -236,7 +236,7 @@ func (ms *MetaSnapshot) getRevOnCache(ts typeutil.Timestamp) int64 { return 0 } -func (ms *MetaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 { +func (ms *metaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 { if rev < 2 { return 0 } @@ -265,7 +265,7 @@ func (ms *MetaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 { return 0 } -func (ms *MetaSnapshot) getRev(ts typeutil.Timestamp) (int64, error) { +func (ms *metaSnapshot) getRev(ts typeutil.Timestamp) (int64, error) { rev := ms.getRevOnCache(ts) if rev > 0 { return rev, nil @@ -278,7 +278,7 @@ func (ms *MetaSnapshot) getRev(ts typeutil.Timestamp) (int64, error) { return 0, fmt.Errorf("can't find revision on ts=%d", ts) } -func (ms *MetaSnapshot) Save(key, value string, ts typeutil.Timestamp) error { +func (ms *metaSnapshot) Save(key, value string, ts typeutil.Timestamp) error { ms.lock.Lock() defer ms.lock.Unlock() ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) @@ -297,7 +297,7 @@ func (ms *MetaSnapshot) Save(key, value string, ts typeutil.Timestamp) error { return nil } -func (ms *MetaSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) { +func (ms *metaSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) { ms.lock.RLock() defer ms.lock.RUnlock() ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) @@ -327,7 +327,7 @@ func (ms *MetaSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) return string(resp.Kvs[0].Value), nil } -func (ms *MetaSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error { +func (ms *metaSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error { ms.lock.Lock() defer ms.lock.Unlock() ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) @@ -348,7 +348,7 @@ func (ms *MetaSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) return nil } -func (ms *MetaSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) { +func (ms *metaSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) { ms.lock.RLock() defer ms.lock.RUnlock() ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) @@ -385,7 +385,7 @@ func (ms *MetaSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]str return keys, values, nil } -func (ms *MetaSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error { +func (ms *metaSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error { ms.lock.Lock() defer ms.lock.Unlock() ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) diff --git a/internal/metastore/kv/meta_snapshot_test.go b/internal/rootcoord/meta_snapshot_test.go similarity index 92% rename from internal/metastore/kv/meta_snapshot_test.go rename to internal/rootcoord/meta_snapshot_test.go index cad594321f..e4ec59649f 100644 --- a/internal/metastore/kv/meta_snapshot_test.go +++ b/internal/rootcoord/meta_snapshot_test.go @@ -14,32 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kv +package rootcoord import ( "context" "fmt" "math/rand" - "os" "path" "testing" "time" - "github.com/milvus-io/milvus/internal/util/paramtable" - "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/typeutil" "github.com/stretchr/testify/assert" ) -var Params paramtable.ComponentParam - -func TestMain(m *testing.M) { - Params.Init() - code := m.Run() - os.Exit(code) -} - func TestMetaSnapshot(t *testing.T) { rand.Seed(time.Now().UnixNano()) randVal := rand.Int() @@ -57,7 +46,7 @@ func TestMetaSnapshot(t *testing.T) { return vtso } - ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 4) + ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 4) assert.Nil(t, err) assert.NotNil(t, ms) @@ -71,13 +60,13 @@ func TestMetaSnapshot(t *testing.T) { assert.Nil(t, err) } - ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 4) + ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 4) assert.Nil(t, err) assert.NotNil(t, ms) } func TestSearchOnCache(t *testing.T) { - ms := &MetaSnapshot{} + ms := &metaSnapshot{} for i := 0; i < 8; i++ { ms.ts2Rev = append(ms.ts2Rev, rtPair{ @@ -98,7 +87,7 @@ func TestSearchOnCache(t *testing.T) { } func TestGetRevOnCache(t *testing.T) { - ms := &MetaSnapshot{} + ms := &metaSnapshot{} ms.ts2Rev = make([]rtPair, 7) ms.initTs(7, 16) ms.initTs(6, 14) @@ -192,7 +181,7 @@ func TestGetRevOnEtcd(t *testing.T) { assert.Nil(t, err) defer etcdCli.Close() - ms := MetaSnapshot{ + ms := metaSnapshot{ cli: etcdCli, root: rootPath, tsKey: tsKey, @@ -241,7 +230,7 @@ func TestLoad(t *testing.T) { return vtso } - ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 7) + ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 7) assert.Nil(t, err) assert.NotNil(t, ms) @@ -261,7 +250,7 @@ func TestLoad(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "value-19", val) - ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 11) + ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 11) assert.Nil(t, err) assert.NotNil(t, ms) @@ -289,7 +278,7 @@ func TestMultiSave(t *testing.T) { return vtso } - ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 7) + ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 7) assert.Nil(t, err) assert.NotNil(t, ms) @@ -320,7 +309,7 @@ func TestMultiSave(t *testing.T) { assert.Equal(t, vals[0], "v1-19") assert.Equal(t, vals[1], "v2-19") - ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 11) + ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 11) assert.Nil(t, err) assert.NotNil(t, ms) @@ -354,7 +343,7 @@ func TestMultiSaveAndRemoveWithPrefix(t *testing.T) { } defer etcdCli.Close() - ms, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 7) + ms, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 7) assert.Nil(t, err) assert.NotNil(t, ms) @@ -392,7 +381,7 @@ func TestMultiSaveAndRemoveWithPrefix(t *testing.T) { assert.Equal(t, 39-i, len(vals)) } - ms, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 11) + ms, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 11) assert.Nil(t, err) assert.NotNil(t, ms) @@ -426,7 +415,7 @@ func TestTsBackward(t *testing.T) { assert.Nil(t, err) defer etcdCli.Close() - kv, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024) + kv, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 1024) assert.Nil(t, err) err = kv.loadTs() @@ -436,7 +425,7 @@ func TestTsBackward(t *testing.T) { kv.Save("a", "c", 99) // backward kv.Save("a", "d", 200) - kv, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024) + kv, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 1024) assert.Error(t, err) } @@ -453,7 +442,7 @@ func TestFix7150(t *testing.T) { assert.Nil(t, err) defer etcdCli.Close() - kv, err := NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024) + kv, err := newMetaSnapshot(etcdCli, rootPath, tsKey, 1024) assert.Nil(t, err) err = kv.loadTs() @@ -463,7 +452,7 @@ func TestFix7150(t *testing.T) { kv.Save("a", "c", 0) // bug introduced kv.Save("a", "d", 200) - kv, err = NewMetaSnapshot(etcdCli, rootPath, tsKey, 1024) + kv, err = newMetaSnapshot(etcdCli, rootPath, tsKey, 1024) assert.Nil(t, err) err = kv.loadTs() assert.Nil(t, err) diff --git a/internal/rootcoord/meta_table.go b/internal/rootcoord/meta_table.go index 6745fde8a5..6af2a0df5d 100644 --- a/internal/rootcoord/meta_table.go +++ b/internal/rootcoord/meta_table.go @@ -17,33 +17,53 @@ package rootcoord import ( - "context" + "bytes" + "encoding/json" "fmt" + "path" + "strconv" "sync" + "github.com/golang/protobuf/proto" + "go.uber.org/zap" + "github.com/milvus-io/milvus/internal/kv" "github.com/milvus-io/milvus/internal/log" - "github.com/milvus-io/milvus/internal/metastore" - kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv" - "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/proto/commonpb" pb "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/milvuspb" "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/util/typeutil" - "go.uber.org/zap" ) const ( + // ComponentPrefix prefix for rootcoord component + ComponentPrefix = "root-coord" + + // ProxyMetaPrefix prefix for proxy meta + ProxyMetaPrefix = ComponentPrefix + "/proxy" + + // CollectionMetaPrefix prefix for collection meta + CollectionMetaPrefix = ComponentPrefix + "/collection" + + // SegmentIndexMetaPrefix prefix for segment index meta + SegmentIndexMetaPrefix = ComponentPrefix + "/segment-index" + + // IndexMetaPrefix prefix for index meta + IndexMetaPrefix = ComponentPrefix + "/index" + + // CollectionAliasMetaPrefix prefix for collection alias meta + CollectionAliasMetaPrefix = ComponentPrefix + "/collection-alias" + // TimestampPrefix prefix for timestamp - TimestampPrefix = kvmetestore.ComponentPrefix + "/timestamp" + TimestampPrefix = ComponentPrefix + "/timestamp" // DDOperationPrefix prefix for DD operation - DDOperationPrefix = kvmetestore.ComponentPrefix + "/dd-operation" + DDOperationPrefix = ComponentPrefix + "/dd-operation" // DDMsgSendPrefix prefix to indicate whether DD msg has been send - DDMsgSendPrefix = kvmetestore.ComponentPrefix + "/dd-msg-send" + DDMsgSendPrefix = ComponentPrefix + "/dd-msg-send" // CreateCollectionDDType name of DD type for create collection CreateCollectionDDType = "CreateCollection" @@ -57,6 +77,12 @@ const ( // DropPartitionDDType name of DD type for drop partition DropPartitionDDType = "DropPartition" + // UserSubPrefix subpath for credential user + UserSubPrefix = "/credential/users" + + // CredentialPrefix prefix for credential user + CredentialPrefix = ComponentPrefix + UserSubPrefix + // DefaultIndexType name of default index type for scalar field DefaultIndexType = "STL_SORT" @@ -66,18 +92,15 @@ const ( // MetaTable store all rootCoord meta info type MetaTable struct { - ctx context.Context - txn kv.TxnKV // client of a reliable txnkv service, i.e. etcd client - snapshot kv.SnapShotKV // client of a reliable snapshotkv service, i.e. etcd client - catalog metastore.Catalog - - proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta - collID2Meta map[typeutil.UniqueID]model.Collection // collection id -> collection meta - collName2ID map[string]typeutil.UniqueID // collection name to collection id - collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id - partID2SegID map[typeutil.UniqueID]map[typeutil.UniqueID]bool // partition id -> segment_id -> bool - segID2IndexMeta map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index // collection id/index_id/partition_id/segment_id -> meta - indexID2Meta map[typeutil.UniqueID]model.Index // collection id/index_id -> meta + txn kv.TxnKV // client of a reliable txnkv service, i.e. etcd client + snapshot kv.SnapShotKV // client of a reliable snapshotkv service, i.e. etcd client + proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta + collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection id -> collection meta + collName2ID map[string]typeutil.UniqueID // collection name to collection id + collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id + partID2SegID map[typeutil.UniqueID]map[typeutil.UniqueID]bool // partition id -> segment_id -> bool + segID2IndexMeta map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo // collection id/index_id/partition_id/segment_id -> meta + indexID2Meta map[typeutil.UniqueID]pb.IndexInfo // collection id/index_id -> meta proxyLock sync.RWMutex ddLock sync.RWMutex @@ -86,12 +109,10 @@ type MetaTable struct { // NewMetaTable creates meta table for rootcoord, which stores all in-memory information // for collection, partition, segment, index etc. -func NewMetaTable(ctx context.Context, txn kv.TxnKV, snap kv.SnapShotKV) (*MetaTable, error) { +func NewMetaTable(txn kv.TxnKV, snap kv.SnapShotKV) (*MetaTable, error) { mt := &MetaTable{ - ctx: ctx, txn: txn, snapshot: snap, - catalog: &kvmetestore.Catalog{Txn: txn, Snapshot: snap}, proxyLock: sync.RWMutex{}, ddLock: sync.RWMutex{}, credLock: sync.RWMutex{}, @@ -105,86 +126,195 @@ func NewMetaTable(ctx context.Context, txn kv.TxnKV, snap kv.SnapShotKV) (*MetaT func (mt *MetaTable) reloadFromKV() error { mt.proxyID2Meta = make(map[typeutil.UniqueID]pb.ProxyMeta) - mt.collID2Meta = make(map[typeutil.UniqueID]model.Collection) + mt.collID2Meta = make(map[typeutil.UniqueID]pb.CollectionInfo) mt.collName2ID = make(map[string]typeutil.UniqueID) mt.collAlias2ID = make(map[string]typeutil.UniqueID) mt.partID2SegID = make(map[typeutil.UniqueID]map[typeutil.UniqueID]bool) - mt.segID2IndexMeta = make(map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index) - mt.indexID2Meta = make(map[typeutil.UniqueID]model.Index) + mt.segID2IndexMeta = make(map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo) + mt.indexID2Meta = make(map[typeutil.UniqueID]pb.IndexInfo) - collMap, err := mt.catalog.ListCollections(mt.ctx, 0) + _, values, err := mt.txn.LoadWithPrefix(ProxyMetaPrefix) if err != nil { return err } - for _, coll := range collMap { - mt.collID2Meta[coll.CollectionID] = *coll - mt.collName2ID[coll.Name] = coll.CollectionID + + for _, value := range values { + if bytes.Equal([]byte(value), suffixSnapshotTombstone) { + // backward compatibility, IndexMeta used to be in SnapshotKV + continue + } + proxyMeta := pb.ProxyMeta{} + err = proto.Unmarshal([]byte(value), &proxyMeta) + if err != nil { + return fmt.Errorf("rootcoord Unmarshal pb.ProxyMeta err:%w", err) + } + mt.proxyID2Meta[proxyMeta.ID] = proxyMeta } - indexes, err := mt.catalog.ListIndexes(mt.ctx) + _, values, err = mt.snapshot.LoadWithPrefix(CollectionMetaPrefix, 0) if err != nil { return err } - for _, index := range indexes { - for _, segIndexInfo := range index.SegmentIndexes { - // update partID2SegID - segIDMap, ok := mt.partID2SegID[segIndexInfo.Segment.PartitionID] - if ok { - segIDMap[segIndexInfo.Segment.SegmentID] = true - } else { - idMap := make(map[typeutil.UniqueID]bool) - idMap[segIndexInfo.Segment.SegmentID] = true - mt.partID2SegID[segIndexInfo.Segment.PartitionID] = idMap - } - // update segID2IndexMeta - idx, ok := mt.segID2IndexMeta[segIndexInfo.Segment.SegmentID] - if ok { - idx[index.IndexID] = *index - } else { - meta := make(map[typeutil.UniqueID]model.Index) - meta[index.IndexID] = *index - mt.segID2IndexMeta[segIndexInfo.Segment.SegmentID] = meta - } + for _, value := range values { + collInfo := pb.CollectionInfo{} + err = proto.Unmarshal([]byte(value), &collInfo) + if err != nil { + return fmt.Errorf("rootcoord Unmarshal pb.CollectionInfo err:%w", err) + } + mt.collID2Meta[collInfo.ID] = collInfo + mt.collName2ID[collInfo.Schema.Name] = collInfo.ID + } + + _, values, err = mt.txn.LoadWithPrefix(SegmentIndexMetaPrefix) + if err != nil { + return err + } + for _, value := range values { + if bytes.Equal([]byte(value), suffixSnapshotTombstone) { + // backward compatibility, IndexMeta used to be in SnapshotKV + continue + } + segmentIndexInfo := pb.SegmentIndexInfo{} + err = proto.Unmarshal([]byte(value), &segmentIndexInfo) + if err != nil { + return fmt.Errorf("rootcoord Unmarshal pb.SegmentIndexInfo err:%w", err) } - mt.indexID2Meta[index.IndexID] = *index + // update partID2SegID + segIDMap, ok := mt.partID2SegID[segmentIndexInfo.PartitionID] + if ok { + segIDMap[segmentIndexInfo.SegmentID] = true + } else { + idMap := make(map[typeutil.UniqueID]bool) + idMap[segmentIndexInfo.SegmentID] = true + mt.partID2SegID[segmentIndexInfo.PartitionID] = idMap + } + + // update segID2IndexMeta + idx, ok := mt.segID2IndexMeta[segmentIndexInfo.SegmentID] + if ok { + idx[segmentIndexInfo.IndexID] = segmentIndexInfo + } else { + meta := make(map[typeutil.UniqueID]pb.SegmentIndexInfo) + meta[segmentIndexInfo.IndexID] = segmentIndexInfo + mt.segID2IndexMeta[segmentIndexInfo.SegmentID] = meta + } } - collAliases, err := mt.catalog.ListAliases(mt.ctx) + _, values, err = mt.txn.LoadWithPrefix(IndexMetaPrefix) if err != nil { return err } - for _, aliasInfo := range collAliases { - mt.collAlias2ID[aliasInfo.Name] = aliasInfo.CollectionID + for _, value := range values { + if bytes.Equal([]byte(value), suffixSnapshotTombstone) { + // backward compatibility, IndexMeta used to be in SnapshotKV + continue + } + meta := pb.IndexInfo{} + err = proto.Unmarshal([]byte(value), &meta) + if err != nil { + return fmt.Errorf("rootcoord Unmarshal pb.IndexInfo err:%w", err) + } + mt.indexID2Meta[meta.IndexID] = meta + } + + _, values, err = mt.snapshot.LoadWithPrefix(CollectionAliasMetaPrefix, 0) + if err != nil { + return err + } + for _, value := range values { + aliasInfo := pb.CollectionInfo{} + err = proto.Unmarshal([]byte(value), &aliasInfo) + if err != nil { + return fmt.Errorf("rootcoord Unmarshal pb.AliasInfo err:%w", err) + } + mt.collAlias2ID[aliasInfo.Schema.Name] = aliasInfo.ID } log.Debug("reload meta table from KV successfully") return nil } +// AddProxy add proxy +func (mt *MetaTable) AddProxy(po *pb.ProxyMeta) error { + mt.proxyLock.Lock() + defer mt.proxyLock.Unlock() + + k := fmt.Sprintf("%s/%d", ProxyMetaPrefix, po.ID) + v, err := proto.Marshal(po) + if err != nil { + log.Error("Failed to marshal ProxyMeta in AddProxy", zap.Error(err)) + return err + } + + err = mt.txn.Save(k, string(v)) + if err != nil { + log.Error("SnapShotKV Save fail", zap.Error(err)) + panic("SnapShotKV Save fail") + } + mt.proxyID2Meta[po.ID] = *po + return nil +} + // AddCollection add collection -func (mt *MetaTable) AddCollection(coll *model.Collection, ts typeutil.Timestamp, ddOpStr string) error { +func (mt *MetaTable) AddCollection(coll *pb.CollectionInfo, ts typeutil.Timestamp, idx []*pb.IndexInfo, ddOpStr string) error { mt.ddLock.Lock() defer mt.ddLock.Unlock() - if _, ok := mt.collName2ID[coll.Name]; ok { - return fmt.Errorf("collection %s exist", coll.Name) + if len(coll.PartitionIDs) != len(coll.PartitionNames) || + len(coll.PartitionIDs) != len(coll.PartitionCreatedTimestamps) || + (len(coll.PartitionIDs) != 1 && len(coll.PartitionIDs) != 0) { + return fmt.Errorf("partition parameters' length mis-match when creating collection") + } + if _, ok := mt.collName2ID[coll.Schema.Name]; ok { + return fmt.Errorf("collection %s exist", coll.Schema.Name) + } + if len(coll.FieldIndexes) != len(idx) { + return fmt.Errorf("incorrect index id when creating collection") } - - mt.collID2Meta[coll.CollectionID] = *coll - mt.collName2ID[coll.Name] = coll.CollectionID coll.CreateTime = ts - for _, partition := range coll.Partitions { - partition.PartitionCreatedTimestamp = ts + if len(coll.PartitionCreatedTimestamps) == 1 { + coll.PartitionCreatedTimestamps[0] = ts + } + mt.collID2Meta[coll.ID] = *coll + mt.collName2ID[coll.Schema.Name] = coll.ID + for _, i := range idx { + mt.indexID2Meta[i.IndexID] = *i } - meta := map[string]string{} + k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.ID) + v1, err := proto.Marshal(coll) + if err != nil { + log.Error("MetaTable AddCollection saveColl Marshal fail", + zap.String("key", k1), zap.Error(err)) + return fmt.Errorf("metaTable AddCollection Marshal fail key:%s, err:%w", k1, err) + } + meta := map[string]string{k1: string(v1)} + + for _, i := range idx { + k := fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, coll.ID, i.IndexID) + v, err := proto.Marshal(i) + if err != nil { + log.Error("MetaTable AddCollection Marshal fail", zap.String("key", k), + zap.String("IndexName", i.IndexName), zap.Error(err)) + return fmt.Errorf("metaTable AddCollection Marshal fail key:%s, err:%w", k, err) + } + meta[k] = string(v) + } + + // save ddOpStr into etcd meta[DDMsgSendPrefix] = "false" meta[DDOperationPrefix] = ddOpStr - coll.Extra = meta - return mt.catalog.CreateCollection(mt.ctx, coll, ts) + + err = mt.snapshot.MultiSave(meta, ts) + if err != nil { + log.Error("SnapShotKV MultiSave fail", zap.Error(err)) + panic("SnapShotKV MultiSave fail") + } + + return nil } // DeleteCollection delete collection @@ -192,26 +322,29 @@ func (mt *MetaTable) DeleteCollection(collID typeutil.UniqueID, ts typeutil.Time mt.ddLock.Lock() defer mt.ddLock.Unlock() - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { return fmt.Errorf("can't find collection. id = %d", collID) } delete(mt.collID2Meta, collID) - delete(mt.collName2ID, col.Name) + delete(mt.collName2ID, collMeta.Schema.Name) // update segID2IndexMeta - for _, partition := range col.Partitions { - partID := partition.PartitionID + for partID := range collMeta.PartitionIDs { if segIDMap, ok := mt.partID2SegID[typeutil.UniqueID(partID)]; ok { for segID := range segIDMap { delete(mt.segID2IndexMeta, segID) } } + } + + // update partID2SegID + for partID := range collMeta.PartitionIDs { delete(mt.partID2SegID, typeutil.UniqueID(partID)) } - for _, idxInfo := range col.FieldIndexes { + for _, idxInfo := range collMeta.FieldIndexes { _, ok := mt.indexID2Meta[idxInfo.IndexID] if !ok { log.Warn("index id not exist", zap.Int64("index id", idxInfo.IndexID)) @@ -224,23 +357,42 @@ func (mt *MetaTable) DeleteCollection(collID typeutil.UniqueID, ts typeutil.Time for alias, cid := range mt.collAlias2ID { if cid == collID { aliases = append(aliases, alias) - delete(mt.collAlias2ID, alias) } } + delMetakeysSnap := []string{ + fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID), + } + delMetaKeysTxn := []string{ + fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collID), + fmt.Sprintf("%s/%d", IndexMetaPrefix, collID), + } + + for _, alias := range aliases { + delete(mt.collAlias2ID, alias) + delMetakeysSnap = append(delMetakeysSnap, + fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias), + ) + } + // save ddOpStr into etcd - var meta = map[string]string{ + var saveMeta = map[string]string{ DDMsgSendPrefix: "false", DDOperationPrefix: ddOpStr, } - collection := &model.Collection{ - CollectionID: collID, - Aliases: aliases, - Extra: meta, + err := mt.snapshot.MultiSaveAndRemoveWithPrefix(map[string]string{}, delMetakeysSnap, ts) + if err != nil { + log.Error("SnapShotKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err)) + panic("SnapShotKV MultiSaveAndRemoveWithPrefix fail") + } + err = mt.txn.MultiSaveAndRemoveWithPrefix(saveMeta, delMetaKeysTxn) + if err != nil { + log.Warn("TxnKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err)) + //Txn kv fail will no panic here, treated as garbage } - return mt.catalog.DropCollection(mt.ctx, collection, ts) + return nil } // HasCollection return collection existence @@ -251,8 +403,9 @@ func (mt *MetaTable) HasCollection(collID typeutil.UniqueID, ts typeutil.Timesta _, ok := mt.collID2Meta[collID] return ok } - - return mt.catalog.CollectionExists(mt.ctx, collID, ts) + key := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID) + _, err := mt.snapshot.Load(key, ts) + return err == nil } // GetCollectionIDByName returns the collection ID according to its name. @@ -269,7 +422,7 @@ func (mt *MetaTable) GetCollectionIDByName(cName string) (typeutil.UniqueID, err } // GetCollectionByID return collection meta by collection id -func (mt *MetaTable) GetCollectionByID(collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*model.Collection, error) { +func (mt *MetaTable) GetCollectionByID(collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*pb.CollectionInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() @@ -278,14 +431,24 @@ func (mt *MetaTable) GetCollectionByID(collectionID typeutil.UniqueID, ts typeut if !ok { return nil, fmt.Errorf("can't find collection id : %d", collectionID) } - return model.CloneCollectionModel(col), nil + colCopy := proto.Clone(&col) + return colCopy.(*pb.CollectionInfo), nil } - - return mt.catalog.GetCollectionByID(mt.ctx, collectionID, ts) + key := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionID) + val, err := mt.snapshot.Load(key, ts) + if err != nil { + return nil, err + } + colMeta := pb.CollectionInfo{} + err = proto.Unmarshal([]byte(val), &colMeta) + if err != nil { + return nil, err + } + return &colMeta, nil } // GetCollectionByName return collection meta by collection name -func (mt *MetaTable) GetCollectionByName(collectionName string, ts typeutil.Timestamp) (*model.Collection, error) { +func (mt *MetaTable) GetCollectionByName(collectionName string, ts typeutil.Timestamp) (*pb.CollectionInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() @@ -300,28 +463,56 @@ func (mt *MetaTable) GetCollectionByName(collectionName string, ts typeutil.Time if !ok { return nil, fmt.Errorf("can't find collection %s with id %d", collectionName, vid) } - - return model.CloneCollectionModel(col), nil + colCopy := proto.Clone(&col) + return colCopy.(*pb.CollectionInfo), nil } - - return mt.catalog.GetCollectionByName(mt.ctx, collectionName, ts) + _, vals, err := mt.snapshot.LoadWithPrefix(CollectionMetaPrefix, ts) + if err != nil { + log.Warn("failed to load table from meta snapshot", zap.Error(err)) + return nil, err + } + for _, val := range vals { + collMeta := pb.CollectionInfo{} + err = proto.Unmarshal([]byte(val), &collMeta) + if err != nil { + log.Warn("unmarshal collection info failed", zap.Error(err)) + continue + } + if collMeta.Schema.Name == collectionName { + return &collMeta, nil + } + } + return nil, fmt.Errorf("can't find collection: %s, at timestamp = %d", collectionName, ts) } // ListCollections list all collection names -func (mt *MetaTable) ListCollections(ts typeutil.Timestamp) (map[string]*model.Collection, error) { +func (mt *MetaTable) ListCollections(ts typeutil.Timestamp) (map[string]*pb.CollectionInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() - cols := make(map[string]*model.Collection) + colls := make(map[string]*pb.CollectionInfo) if ts == 0 { for collName, collID := range mt.collName2ID { - col := mt.collID2Meta[collID] - cols[collName] = model.CloneCollectionModel(col) + coll := mt.collID2Meta[collID] + colCopy := proto.Clone(&coll) + colls[collName] = colCopy.(*pb.CollectionInfo) } - return cols, nil + return colls, nil } - - return mt.catalog.ListCollections(mt.ctx, ts) + _, vals, err := mt.snapshot.LoadWithPrefix(CollectionMetaPrefix, ts) + if err != nil { + log.Debug("load with prefix error", zap.Uint64("timestamp", ts), zap.Error(err)) + return nil, nil + } + for _, val := range vals { + collMeta := pb.CollectionInfo{} + err := proto.Unmarshal([]byte(val), &collMeta) + if err != nil { + log.Debug("unmarshal collection info failed", zap.Error(err)) + } + colls[collMeta.Schema.Name] = &collMeta + } + return colls, nil } // ListAliases list all collection aliases @@ -371,34 +562,61 @@ func (mt *MetaTable) AddPartition(collID typeutil.UniqueID, partitionName string } // number of partition tags (except _default) should be limited to 4096 by default - if int64(len(coll.Partitions)) >= Params.RootCoordCfg.MaxPartitionNum { + if int64(len(coll.PartitionIDs)) >= Params.RootCoordCfg.MaxPartitionNum { return fmt.Errorf("maximum partition's number should be limit to %d", Params.RootCoordCfg.MaxPartitionNum) } - for _, p := range coll.Partitions { - if p.PartitionID == partitionID { + if len(coll.PartitionIDs) != len(coll.PartitionNames) { + return fmt.Errorf("len(coll.PartitionIDs)=%d, len(coll.PartitionNames)=%d", len(coll.PartitionIDs), len(coll.PartitionNames)) + } + + if len(coll.PartitionIDs) != len(coll.PartitionCreatedTimestamps) { + return fmt.Errorf("len(coll.PartitionIDs)=%d, len(coll.PartitionCreatedTimestamps)=%d", len(coll.PartitionIDs), len(coll.PartitionCreatedTimestamps)) + } + + if len(coll.PartitionNames) != len(coll.PartitionCreatedTimestamps) { + return fmt.Errorf("len(coll.PartitionNames)=%d, len(coll.PartitionCreatedTimestamps)=%d", len(coll.PartitionNames), len(coll.PartitionCreatedTimestamps)) + } + + for idx := range coll.PartitionIDs { + if coll.PartitionIDs[idx] == partitionID { return fmt.Errorf("partition id = %d already exists", partitionID) } - if p.PartitionName == partitionName { + if coll.PartitionNames[idx] == partitionName { return fmt.Errorf("partition name = %s already exists", partitionName) } // no necessary to check created timestamp } - coll.Partitions = append(coll.Partitions, - &model.Partition{ - PartitionID: partitionID, - PartitionName: partitionName, - PartitionCreatedTimestamp: ts, - }) + coll.PartitionIDs = append(coll.PartitionIDs, partitionID) + coll.PartitionNames = append(coll.PartitionNames, partitionName) + coll.PartitionCreatedTimestamps = append(coll.PartitionCreatedTimestamps, ts) mt.collID2Meta[collID] = coll + k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID) + v1, err := proto.Marshal(&coll) + if err != nil { + log.Error("MetaTable AddPartition saveColl Marshal fail", + zap.String("key", k1), zap.Error(err)) + return fmt.Errorf("metaTable AddPartition Marshal fail, k1:%s, err:%w", k1, err) + } + meta := map[string]string{k1: string(v1)} metaTxn := map[string]string{} // save ddOpStr into etcd metaTxn[DDMsgSendPrefix] = "false" metaTxn[DDOperationPrefix] = ddOpStr - coll.Extra = metaTxn - return mt.catalog.CreatePartition(mt.ctx, &coll, ts) + + err = mt.snapshot.MultiSave(meta, ts) + if err != nil { + log.Error("SnapShotKV MultiSave fail", zap.Error(err)) + panic("SnapShotKV MultiSave fail") + } + err = mt.txn.MultiSave(metaTxn) + if err != nil { + // will not panic, missing create msg + log.Warn("TxnKV MultiSave fail", zap.Error(err)) + } + return nil } // GetPartitionNameByID return partition name by partition id @@ -406,25 +624,30 @@ func (mt *MetaTable) GetPartitionNameByID(collID, partitionID typeutil.UniqueID, if ts == 0 { mt.ddLock.RLock() defer mt.ddLock.RUnlock() - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { return "", fmt.Errorf("can't find collection id = %d", collID) } - for _, partition := range col.Partitions { - if partition.PartitionID == partitionID { - return partition.PartitionName, nil + for idx := range collMeta.PartitionIDs { + if collMeta.PartitionIDs[idx] == partitionID { + return collMeta.PartitionNames[idx], nil } } return "", fmt.Errorf("partition %d does not exist", partitionID) } - - col, err := mt.catalog.GetCollectionByID(mt.ctx, collID, ts) + collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID) + collVal, err := mt.snapshot.Load(collKey, ts) if err != nil { return "", err } - for _, partition := range col.Partitions { - if partition.PartitionID == partitionID { - return partition.PartitionName, nil + collMeta := pb.CollectionInfo{} + err = proto.Unmarshal([]byte(collVal), &collMeta) + if err != nil { + return "", err + } + for idx := range collMeta.PartitionIDs { + if collMeta.PartitionIDs[idx] == partitionID { + return collMeta.PartitionNames[idx], nil } } return "", fmt.Errorf("partition %d does not exist", partitionID) @@ -432,25 +655,30 @@ func (mt *MetaTable) GetPartitionNameByID(collID, partitionID typeutil.UniqueID, func (mt *MetaTable) getPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) { if ts == 0 { - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { return 0, fmt.Errorf("can't find collection id = %d", collID) } - for _, partition := range col.Partitions { - if partition.PartitionName == partitionName { - return partition.PartitionID, nil + for idx := range collMeta.PartitionIDs { + if collMeta.PartitionNames[idx] == partitionName { + return collMeta.PartitionIDs[idx], nil } } return 0, fmt.Errorf("partition %s does not exist", partitionName) } - - col, err := mt.catalog.GetCollectionByID(mt.ctx, collID, ts) + collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID) + collVal, err := mt.snapshot.Load(collKey, ts) if err != nil { return 0, err } - for _, partition := range col.Partitions { - if partition.PartitionName == partitionName { - return partition.PartitionID, nil + collMeta := pb.CollectionInfo{} + err = proto.Unmarshal([]byte(collVal), &collMeta) + if err != nil { + return 0, err + } + for idx := range collMeta.PartitionIDs { + if collMeta.PartitionNames[idx] == partitionName { + return collMeta.PartitionIDs[idx], nil } } return 0, fmt.Errorf("partition %s does not exist", partitionName) @@ -480,7 +708,7 @@ func (mt *MetaTable) DeletePartition(collID typeutil.UniqueID, partitionName str return 0, fmt.Errorf("default partition cannot be deleted") } - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { return 0, fmt.Errorf("can't find collection id = %d", collID) } @@ -488,22 +716,27 @@ func (mt *MetaTable) DeletePartition(collID typeutil.UniqueID, partitionName str // check tag exists exist := false - parts := make([]*model.Partition, 0, len(col.Partitions)) - + pd := make([]typeutil.UniqueID, 0, len(collMeta.PartitionIDs)) + pn := make([]string, 0, len(collMeta.PartitionNames)) + pts := make([]uint64, 0, len(collMeta.PartitionCreatedTimestamps)) var partID typeutil.UniqueID - for _, partition := range col.Partitions { - if partition.PartitionName == partitionName { - partID = partition.PartitionID + for idx := range collMeta.PartitionIDs { + if collMeta.PartitionNames[idx] == partitionName { + partID = collMeta.PartitionIDs[idx] exist = true } else { - parts = append(parts, partition) + pd = append(pd, collMeta.PartitionIDs[idx]) + pn = append(pn, collMeta.PartitionNames[idx]) + pts = append(pts, collMeta.PartitionCreatedTimestamps[idx]) } } if !exist { return 0, fmt.Errorf("partition %s does not exist", partitionName) } - col.Partitions = parts - mt.collID2Meta[collID] = col + collMeta.PartitionIDs = pd + collMeta.PartitionNames = pn + collMeta.PartitionCreatedTimestamps = pts + mt.collID2Meta[collID] = collMeta // update segID2IndexMeta and partID2SegID if segIDMap, ok := mt.partID2SegID[partID]; ok { @@ -513,56 +746,98 @@ func (mt *MetaTable) DeletePartition(collID typeutil.UniqueID, partitionName str } delete(mt.partID2SegID, partID) + k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collID, 10)) + v, err := proto.Marshal(&collMeta) + if err != nil { + log.Error("MetaTable DeletePartition Marshal collectionMeta fail", + zap.String("key", k), zap.Error(err)) + return 0, fmt.Errorf("metaTable DeletePartition Marshal collectionMeta fail key:%s, err:%w", k, err) + } + var delMetaKeys []string + for _, idxInfo := range collMeta.FieldIndexes { + k := fmt.Sprintf("%s/%d/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, idxInfo.IndexID, partID) + delMetaKeys = append(delMetaKeys, k) + } + metaTxn := make(map[string]string) // save ddOpStr into etcd metaTxn[DDMsgSendPrefix] = "false" metaTxn[DDOperationPrefix] = ddOpStr - col.Extra = metaTxn - err := mt.catalog.DropPartition(mt.ctx, &col, partID, ts) + err = mt.snapshot.Save(k, string(v), ts) if err != nil { - return 0, err + log.Error("SnapShotKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err)) + panic("SnapShotKV MultiSaveAndRemoveWithPrefix fail") + } + err = mt.txn.MultiSaveAndRemoveWithPrefix(metaTxn, delMetaKeys) + if err != nil { + log.Warn("TxnKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err)) + // will not panic, failed txn shall be treated by garbage related logic } return partID, nil } -func (mt *MetaTable) updateSegmentIndexMetaCache(index *model.Index) { - for _, segIdxInfo := range index.SegmentIndexes { - if _, ok := mt.partID2SegID[segIdxInfo.PartitionID]; !ok { - segIDMap := map[typeutil.UniqueID]bool{segIdxInfo.SegmentID: true} - mt.partID2SegID[segIdxInfo.PartitionID] = segIDMap - } else { - mt.partID2SegID[segIdxInfo.PartitionID][segIdxInfo.SegmentID] = true - } - - _, ok := mt.segID2IndexMeta[segIdxInfo.SegmentID] - if !ok { - idxMap := map[typeutil.UniqueID]model.Index{index.IndexID: *index} - mt.segID2IndexMeta[segIdxInfo.SegmentID] = idxMap - } else { - mt.segID2IndexMeta[segIdxInfo.SegmentID][index.IndexID] = *index - } - } -} - -// AlterIndex alter index -func (mt *MetaTable) AlterIndex(newIndex *model.Index) error { +// AddIndex add index +func (mt *MetaTable) AddIndex(segIdxInfo *pb.SegmentIndexInfo) error { mt.ddLock.Lock() defer mt.ddLock.Unlock() - _, ok := mt.collID2Meta[newIndex.CollectionID] + collMeta, ok := mt.collID2Meta[segIdxInfo.CollectionID] if !ok { - return fmt.Errorf("collection id = %d not found", newIndex.CollectionID) + return fmt.Errorf("collection id = %d not found", segIdxInfo.CollectionID) + } + exist := false + for _, fidx := range collMeta.FieldIndexes { + if fidx.IndexID == segIdxInfo.IndexID { + exist = true + break + } + } + if !exist { + return fmt.Errorf("index id = %d not found", segIdxInfo.IndexID) } - oldIndex, ok := mt.indexID2Meta[newIndex.IndexID] - if !ok { - return fmt.Errorf("index id = %d not found", newIndex.IndexID) + if _, ok := mt.partID2SegID[segIdxInfo.PartitionID]; !ok { + segIDMap := map[typeutil.UniqueID]bool{segIdxInfo.SegmentID: true} + mt.partID2SegID[segIdxInfo.PartitionID] = segIDMap } - mt.updateSegmentIndexMetaCache(newIndex) - return mt.catalog.AlterIndex(mt.ctx, &oldIndex, newIndex) + segIdxMap, ok := mt.segID2IndexMeta[segIdxInfo.SegmentID] + if !ok { + idxMap := map[typeutil.UniqueID]pb.SegmentIndexInfo{segIdxInfo.IndexID: *segIdxInfo} + mt.segID2IndexMeta[segIdxInfo.SegmentID] = idxMap + } else { + tmpInfo, ok := segIdxMap[segIdxInfo.IndexID] + if ok { + if SegmentIndexInfoEqual(segIdxInfo, &tmpInfo) { + if segIdxInfo.BuildID == tmpInfo.BuildID { + log.Debug("Identical SegmentIndexInfo already exist", zap.Int64("IndexID", segIdxInfo.IndexID)) + return nil + } + return fmt.Errorf("index id = %d exist", segIdxInfo.IndexID) + } + } + } + + mt.segID2IndexMeta[segIdxInfo.SegmentID][segIdxInfo.IndexID] = *segIdxInfo + mt.partID2SegID[segIdxInfo.PartitionID][segIdxInfo.SegmentID] = true + + k := fmt.Sprintf("%s/%d/%d/%d/%d", SegmentIndexMetaPrefix, segIdxInfo.CollectionID, segIdxInfo.IndexID, segIdxInfo.PartitionID, segIdxInfo.SegmentID) + v, err := proto.Marshal(segIdxInfo) + if err != nil { + log.Error("MetaTable AddIndex Marshal segIdxInfo fail", + zap.String("key", k), zap.Error(err)) + return fmt.Errorf("metaTable AddIndex Marshal segIdxInfo fail key:%s, err:%w", k, err) + } + + err = mt.txn.Save(k, string(v)) + if err != nil { + log.Error("SnapShotKV Save fail", zap.Error(err)) + panic("SnapShotKV Save fail") + } + + return nil } // DropIndex drop index @@ -577,7 +852,7 @@ func (mt *MetaTable) DropIndex(collName, fieldName, indexName string) (typeutil. return 0, false, fmt.Errorf("collection name = %s not exist", collName) } } - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { return 0, false, fmt.Errorf("collection name = %s not has meta", collName) } @@ -585,10 +860,10 @@ func (mt *MetaTable) DropIndex(collName, fieldName, indexName string) (typeutil. if err != nil { return 0, false, err } - fieldIdxInfo := make([]*model.Index, 0, len(col.FieldIndexes)) + fieldIdxInfo := make([]*pb.FieldIndexInfo, 0, len(collMeta.FieldIndexes)) var dropIdxID typeutil.UniqueID - for i, info := range col.FieldIndexes { - if info.FieldID != fieldSch.FieldID { + for i, info := range collMeta.FieldIndexes { + if info.FiledID != fieldSch.FieldID { fieldIdxInfo = append(fieldIdxInfo, info) continue } @@ -603,22 +878,29 @@ func (mt *MetaTable) DropIndex(collName, fieldName, indexName string) (typeutil. continue } dropIdxID = info.IndexID - fieldIdxInfo = append(fieldIdxInfo, col.FieldIndexes[i+1:]...) + fieldIdxInfo = append(fieldIdxInfo, collMeta.FieldIndexes[i+1:]...) break } - - if len(fieldIdxInfo) == len(col.FieldIndexes) { + if len(fieldIdxInfo) == len(collMeta.FieldIndexes) { log.Warn("drop index,index not found", zap.String("collection name", collName), zap.String("filed name", fieldName), zap.String("index name", indexName)) return 0, false, nil } - - // update cache - col.FieldIndexes = fieldIdxInfo - mt.collID2Meta[collID] = col + collMeta.FieldIndexes = fieldIdxInfo + mt.collID2Meta[collID] = collMeta + k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collID, 10)) + v, err := proto.Marshal(&collMeta) + if err != nil { + log.Error("MetaTable DropIndex Marshal collMeta fail", + zap.String("key", k), zap.Error(err)) + return 0, false, fmt.Errorf("metaTable DropIndex Marshal collMeta fail key:%s, err:%w", k, err) + } + saveMeta := map[string]string{k: string(v)} delete(mt.indexID2Meta, dropIdxID) - for _, part := range col.Partitions { - if segIDMap, ok := mt.partID2SegID[part.PartitionID]; ok { + + // update segID2IndexMeta + for _, partID := range collMeta.PartitionIDs { + if segIDMap, ok := mt.partID2SegID[partID]; ok { for segID := range segIDMap { if segIndexInfos, ok := mt.segID2IndexMeta[segID]; ok { delete(segIndexInfos, dropIdxID) @@ -627,38 +909,37 @@ func (mt *MetaTable) DropIndex(collName, fieldName, indexName string) (typeutil. } } - // update metastore - err = mt.catalog.DropIndex(mt.ctx, &col, dropIdxID, 0) + delMeta := []string{ + fmt.Sprintf("%s/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, dropIdxID), + fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, collMeta.ID, dropIdxID), + } + + err = mt.txn.MultiSaveAndRemoveWithPrefix(saveMeta, delMeta) if err != nil { - return 0, false, err + log.Error("TxnKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err)) + panic("TxnKV MultiSaveAndRemoveWithPrefix fail") } return dropIdxID, true, nil } // GetSegmentIndexInfoByID return segment index info by segment id -func (mt *MetaTable) GetSegmentIndexInfoByID(segID typeutil.UniqueID, fieldID int64, idxName string) (model.Index, error) { +func (mt *MetaTable) GetSegmentIndexInfoByID(segID typeutil.UniqueID, fieldID int64, idxName string) (pb.SegmentIndexInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() segIdxMap, ok := mt.segID2IndexMeta[segID] if !ok { - return model.Index{ - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - }, - BuildID: 0, - EnableIndex: false, - }, - }, - FieldID: fieldID, - IndexID: 0, + return pb.SegmentIndexInfo{ + SegmentID: segID, + FieldID: fieldID, + IndexID: 0, + BuildID: 0, + EnableIndex: false, }, nil } if len(segIdxMap) == 0 { - return model.Index{}, fmt.Errorf("segment id %d not has any index", segID) + return pb.SegmentIndexInfo{}, fmt.Errorf("segment id %d not has any index", segID) } if fieldID == -1 && idxName == "" { // return default index @@ -682,10 +963,10 @@ func (mt *MetaTable) GetSegmentIndexInfoByID(segID typeutil.UniqueID, fieldID in } } } - return model.Index{}, fmt.Errorf("can't find index name = %s on segment = %d, with filed id = %d", idxName, segID, fieldID) + return pb.SegmentIndexInfo{}, fmt.Errorf("can't find index name = %s on segment = %d, with filed id = %d", idxName, segID, fieldID) } -func (mt *MetaTable) GetSegmentIndexInfos(segID typeutil.UniqueID) (map[typeutil.UniqueID]model.Index, error) { +func (mt *MetaTable) GetSegmentIndexInfos(segID typeutil.UniqueID) (map[typeutil.UniqueID]pb.SegmentIndexInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() @@ -698,48 +979,48 @@ func (mt *MetaTable) GetSegmentIndexInfos(segID typeutil.UniqueID) (map[typeutil } // GetFieldSchema return field schema -func (mt *MetaTable) GetFieldSchema(collName string, fieldName string) (model.Field, error) { +func (mt *MetaTable) GetFieldSchema(collName string, fieldName string) (schemapb.FieldSchema, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() return mt.unlockGetFieldSchema(collName, fieldName) } -func (mt *MetaTable) unlockGetFieldSchema(collName string, fieldName string) (model.Field, error) { +func (mt *MetaTable) unlockGetFieldSchema(collName string, fieldName string) (schemapb.FieldSchema, error) { collID, ok := mt.collName2ID[collName] if !ok { collID, ok = mt.collAlias2ID[collName] if !ok { - return model.Field{}, fmt.Errorf("collection %s not found", collName) + return schemapb.FieldSchema{}, fmt.Errorf("collection %s not found", collName) } } - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { - return model.Field{}, fmt.Errorf("collection %s not found", collName) + return schemapb.FieldSchema{}, fmt.Errorf("collection %s not found", collName) } - for _, field := range col.Fields { + for _, field := range collMeta.Schema.Fields { if field.Name == fieldName { return *field, nil } } - return model.Field{}, fmt.Errorf("collection %s doesn't have filed %s", collName, fieldName) + return schemapb.FieldSchema{}, fmt.Errorf("collection %s doesn't have filed %s", collName, fieldName) } // IsSegmentIndexed check if segment has indexed -func (mt *MetaTable) IsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *model.Field, indexParams []*commonpb.KeyValuePair) bool { +func (mt *MetaTable) IsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *schemapb.FieldSchema, indexParams []*commonpb.KeyValuePair) bool { mt.ddLock.RLock() defer mt.ddLock.RUnlock() return mt.unlockIsSegmentIndexed(segID, fieldSchema, indexParams) } -func (mt *MetaTable) unlockIsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *model.Field, indexParams []*commonpb.KeyValuePair) bool { - index, ok := mt.segID2IndexMeta[segID] +func (mt *MetaTable) unlockIsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *schemapb.FieldSchema, indexParams []*commonpb.KeyValuePair) bool { + segIdx, ok := mt.segID2IndexMeta[segID] if !ok { return false } - isIndexed := false - for idxID, meta := range index { + exist := false + for idxID, meta := range segIdx { if meta.FieldID != fieldSchema.FieldID { continue } @@ -747,68 +1028,61 @@ func (mt *MetaTable) unlockIsSegmentIndexed(segID typeutil.UniqueID, fieldSchema if !ok { continue } - - segIndex, ok := meta.SegmentIndexes[segID] - if !ok { - continue - } - - if EqualKeyPairArray(indexParams, idxMeta.IndexParams) && segIndex.EnableIndex { - isIndexed = true + if EqualKeyPairArray(indexParams, idxMeta.IndexParams) { + exist = true break } } - - return isIndexed + return exist } -func (mt *MetaTable) unlockGetCollectionInfo(collName string) (model.Collection, error) { +func (mt *MetaTable) unlockGetCollectionInfo(collName string) (pb.CollectionInfo, error) { collID, ok := mt.collName2ID[collName] if !ok { collID, ok = mt.collAlias2ID[collName] if !ok { - return model.Collection{}, fmt.Errorf("collection not found: %s", collName) + return pb.CollectionInfo{}, fmt.Errorf("collection not found: %s", collName) } } collMeta, ok := mt.collID2Meta[collID] if !ok { - return model.Collection{}, fmt.Errorf("collection not found: %s", collName) + return pb.CollectionInfo{}, fmt.Errorf("collection not found: %s", collName) } return collMeta, nil } -func (mt *MetaTable) checkFieldCanBeIndexed(collMeta model.Collection, fieldSchema model.Field, idxInfo *model.Index) error { +func (mt *MetaTable) checkFieldCanBeIndexed(collMeta pb.CollectionInfo, fieldSchema schemapb.FieldSchema, idxInfo *pb.IndexInfo) error { for _, f := range collMeta.FieldIndexes { - if f.FieldID == fieldSchema.FieldID { - if info, ok := mt.indexID2Meta[f.IndexID]; ok { - if idxInfo.IndexName != info.IndexName { + if f.GetFiledID() == fieldSchema.GetFieldID() { + if info, ok := mt.indexID2Meta[f.GetIndexID()]; ok { + if idxInfo.GetIndexName() != info.GetIndexName() { return fmt.Errorf( "creating multiple indexes on same field is not supported, "+ "collection: %s, field: %s, index name: %s, new index name: %s", - collMeta.Name, fieldSchema.Name, - info.IndexName, idxInfo.IndexName) + collMeta.GetSchema().GetName(), fieldSchema.GetName(), + info.GetIndexName(), idxInfo.GetIndexName()) } } else { // TODO: unexpected: what if index id not exist? Meta incomplete. log.Warn("index meta was incomplete, index id missing in indexID2Meta", - zap.String("collection", collMeta.Name), - zap.String("field", fieldSchema.Name), - zap.Int64("collection id", collMeta.CollectionID), - zap.Int64("field id", fieldSchema.FieldID), - zap.Int64("index id", f.IndexID)) + zap.String("collection", collMeta.GetSchema().GetName()), + zap.String("field", fieldSchema.GetName()), + zap.Int64("collection id", collMeta.GetID()), + zap.Int64("field id", fieldSchema.GetFieldID()), + zap.Int64("index id", f.GetIndexID())) } } } return nil } -func (mt *MetaTable) checkFieldIndexDuplicate(collMeta model.Collection, fieldSchema model.Field, idxInfo *model.Index) (duplicate bool, err error) { +func (mt *MetaTable) checkFieldIndexDuplicate(collMeta pb.CollectionInfo, fieldSchema schemapb.FieldSchema, idxInfo *pb.IndexInfo) (duplicate bool, err error) { for _, f := range collMeta.FieldIndexes { if info, ok := mt.indexID2Meta[f.IndexID]; ok { if info.IndexName == idxInfo.IndexName { // the index name must be different for different indexes - if f.FieldID != fieldSchema.FieldID || !EqualKeyPairArray(info.IndexParams, idxInfo.IndexParams) { - return false, fmt.Errorf("index already exists, collection: %s, field: %s, index: %s", collMeta.Name, fieldSchema.Name, idxInfo.IndexName) + if f.FiledID != fieldSchema.FieldID || !EqualKeyPairArray(info.IndexParams, idxInfo.IndexParams) { + return false, fmt.Errorf("index already exists, collection: %s, field: %s, index: %s", collMeta.GetSchema().GetName(), fieldSchema.GetName(), idxInfo.GetIndexName()) } // same index name, index params, and fieldId @@ -820,43 +1094,25 @@ func (mt *MetaTable) checkFieldIndexDuplicate(collMeta model.Collection, fieldSc } // GetNotIndexedSegments return segment ids which have no index -func (mt *MetaTable) GetNotIndexedSegments(collName string, fieldName string, idxInfo *model.Index, segIDs []typeutil.UniqueID) ([]typeutil.UniqueID, model.Field, error) { +func (mt *MetaTable) GetNotIndexedSegments(collName string, fieldName string, idxInfo *pb.IndexInfo, segIDs []typeutil.UniqueID) ([]typeutil.UniqueID, schemapb.FieldSchema, error) { mt.ddLock.Lock() defer mt.ddLock.Unlock() + collMeta, err := mt.unlockGetCollectionInfo(collName) + if err != nil { + // error here if collection not found. + return nil, schemapb.FieldSchema{}, err + } + fieldSchema, err := mt.unlockGetFieldSchema(collName, fieldName) if err != nil { + // error here if field not found. return nil, fieldSchema, err } - rstID := make([]typeutil.UniqueID, 0, 16) - for _, segID := range segIDs { - if ok := mt.unlockIsSegmentIndexed(segID, &fieldSchema, idxInfo.IndexParams); !ok { - rstID = append(rstID, segID) - } - } - return rstID, fieldSchema, nil -} - -// AddIndex add index -func (mt *MetaTable) AddIndex(colName string, fieldName string, idxInfo *model.Index, segIDs []typeutil.UniqueID) error { - mt.ddLock.Lock() - defer mt.ddLock.Unlock() - - fieldSchema, err := mt.unlockGetFieldSchema(colName, fieldName) - if err != nil { - return err - } - - collMeta, err := mt.unlockGetCollectionInfo(colName) - if err != nil { - // error here if collection not found. - return err - } - //TODO:: check index params for sclar field // set default index type for scalar index - if !typeutil.IsVectorType(fieldSchema.DataType) { + if !typeutil.IsVectorType(fieldSchema.GetDataType()) { if fieldSchema.DataType == schemapb.DataType_VarChar { idxInfo.IndexParams = []*commonpb.KeyValuePair{{Key: "index_type", Value: DefaultStringIndexType}} } else { @@ -865,59 +1121,65 @@ func (mt *MetaTable) AddIndex(colName string, fieldName string, idxInfo *model.I } if idxInfo.IndexParams == nil { - return fmt.Errorf("index param is nil") + return nil, schemapb.FieldSchema{}, fmt.Errorf("index param is nil") } if err := mt.checkFieldCanBeIndexed(collMeta, fieldSchema, idxInfo); err != nil { - return err + return nil, schemapb.FieldSchema{}, err } dupIdx, err := mt.checkFieldIndexDuplicate(collMeta, fieldSchema, idxInfo) if err != nil { // error here if index already exists. - return err + return nil, fieldSchema, err } - if dupIdx { - log.Warn("due to index already exists, skip add index to metastore", zap.Int64("collectionID", collMeta.CollectionID), - zap.Int64("indexID", idxInfo.IndexID), zap.String("indexName", idxInfo.IndexName)) - // skip already exist index - return nil - } - - segmentIndexes := make(map[int64]model.SegmentIndex, len(segIDs)) - for _, segID := range segIDs { - segmentIndex := model.SegmentIndex{ - Segment: model.Segment{ - SegmentID: segID, - }, - EnableIndex: false, + // if no same index exist, save new index info to etcd + if !dupIdx { + idx := &pb.FieldIndexInfo{ + FiledID: fieldSchema.FieldID, + IndexID: idxInfo.IndexID, } - segmentIndexes[segID] = segmentIndex + collMeta.FieldIndexes = append(collMeta.FieldIndexes, idx) + k1 := path.Join(CollectionMetaPrefix, strconv.FormatInt(collMeta.ID, 10)) + v1, err := proto.Marshal(&collMeta) + if err != nil { + log.Error("MetaTable GetNotIndexedSegments Marshal collMeta fail", + zap.String("key", k1), zap.Error(err)) + return nil, schemapb.FieldSchema{}, fmt.Errorf("metaTable GetNotIndexedSegments Marshal collMeta fail key:%s, err:%w", k1, err) + } + + k2 := fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, collMeta.ID, idx.IndexID) + //k2 := path.Join(IndexMetaPrefix, strconv.FormatInt(idx.IndexID, 10)) + v2, err := proto.Marshal(idxInfo) + if err != nil { + log.Error("MetaTable GetNotIndexedSegments Marshal idxInfo fail", + zap.String("key", k2), zap.Error(err)) + return nil, schemapb.FieldSchema{}, fmt.Errorf("metaTable GetNotIndexedSegments Marshal idxInfo fail key:%s, err:%w", k2, err) + } + meta := map[string]string{k1: string(v1), k2: string(v2)} + + err = mt.txn.MultiSave(meta) + if err != nil { + log.Error("TxnKV MultiSave fail", zap.Error(err)) + panic("TxnKV MultiSave fail") + } + + mt.collID2Meta[collMeta.ID] = collMeta + mt.indexID2Meta[idx.IndexID] = *idxInfo } - idxInfo.SegmentIndexes = segmentIndexes - idxInfo.FieldID = fieldSchema.FieldID - idxInfo.CollectionID = collMeta.CollectionID - - idx := &model.Index{ - FieldID: fieldSchema.FieldID, - IndexID: idxInfo.IndexID, - IndexName: idxInfo.IndexName, - Extra: idxInfo.Extra, + rstID := make([]typeutil.UniqueID, 0, 16) + for _, segID := range segIDs { + if exist := mt.unlockIsSegmentIndexed(segID, &fieldSchema, idxInfo.IndexParams); !exist { + rstID = append(rstID, segID) + } } - - collMeta.FieldIndexes = append(collMeta.FieldIndexes, idx) - - mt.catalog.CreateIndex(mt.ctx, &collMeta, idxInfo) - - mt.collID2Meta[collMeta.CollectionID] = collMeta - mt.indexID2Meta[idxInfo.IndexID] = *idxInfo - return nil + return rstID, fieldSchema, nil } // GetIndexByName return index info by index name -func (mt *MetaTable) GetIndexByName(collName, indexName string) (model.Collection, []model.Index, error) { +func (mt *MetaTable) GetIndexByName(collName, indexName string) (pb.CollectionInfo, []pb.IndexInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() @@ -925,29 +1187,29 @@ func (mt *MetaTable) GetIndexByName(collName, indexName string) (model.Collectio if !ok { collID, ok = mt.collAlias2ID[collName] if !ok { - return model.Collection{}, nil, fmt.Errorf("collection %s not found", collName) + return pb.CollectionInfo{}, nil, fmt.Errorf("collection %s not found", collName) } } - col, ok := mt.collID2Meta[collID] + collMeta, ok := mt.collID2Meta[collID] if !ok { - return model.Collection{}, nil, fmt.Errorf("collection %s not found", collName) + return pb.CollectionInfo{}, nil, fmt.Errorf("collection %s not found", collName) } - rstIndex := make([]model.Index, 0, len(col.FieldIndexes)) - for _, idx := range col.FieldIndexes { + rstIndex := make([]pb.IndexInfo, 0, len(collMeta.FieldIndexes)) + for _, idx := range collMeta.FieldIndexes { idxInfo, ok := mt.indexID2Meta[idx.IndexID] if !ok { - return model.Collection{}, nil, fmt.Errorf("index id = %d not found", idx.IndexID) + return pb.CollectionInfo{}, nil, fmt.Errorf("index id = %d not found", idx.IndexID) } if indexName == "" || idxInfo.IndexName == indexName { rstIndex = append(rstIndex, idxInfo) } } - return col, rstIndex, nil + return collMeta, rstIndex, nil } // GetIndexByID return index info by index id -func (mt *MetaTable) GetIndexByID(indexID typeutil.UniqueID) (*model.Index, error) { +func (mt *MetaTable) GetIndexByID(indexID typeutil.UniqueID) (*pb.IndexInfo, error) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() @@ -959,21 +1221,21 @@ func (mt *MetaTable) GetIndexByID(indexID typeutil.UniqueID) (*model.Index, erro } func (mt *MetaTable) dupMeta() ( - map[typeutil.UniqueID]model.Collection, - map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index, - map[typeutil.UniqueID]model.Index, + map[typeutil.UniqueID]pb.CollectionInfo, + map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo, + map[typeutil.UniqueID]pb.IndexInfo, ) { mt.ddLock.RLock() defer mt.ddLock.RUnlock() - collID2Meta := map[typeutil.UniqueID]model.Collection{} - segID2IndexMeta := map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index{} - indexID2Meta := map[typeutil.UniqueID]model.Index{} + collID2Meta := map[typeutil.UniqueID]pb.CollectionInfo{} + segID2IndexMeta := map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo{} + indexID2Meta := map[typeutil.UniqueID]pb.IndexInfo{} for k, v := range mt.collID2Meta { collID2Meta[k] = v } for k, v := range mt.segID2IndexMeta { - segID2IndexMeta[k] = map[typeutil.UniqueID]model.Index{} + segID2IndexMeta[k] = map[typeutil.UniqueID]pb.SegmentIndexInfo{} for k2, v2 := range v { segID2IndexMeta[k][k2] = v2 } @@ -1002,24 +1264,41 @@ func (mt *MetaTable) AddAlias(collectionAlias string, collectionName string, ts } mt.collAlias2ID[collectionAlias] = id - coll := &model.Collection{ - CollectionID: id, - Aliases: []string{collectionAlias}, + k := fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collectionAlias) + v, err := proto.Marshal(&pb.CollectionInfo{ID: id, Schema: &schemapb.CollectionSchema{Name: collectionAlias}}) + if err != nil { + log.Error("MetaTable AddAlias Marshal CollectionInfo fail", + zap.String("key", k), zap.Error(err)) + return fmt.Errorf("metaTable AddAlias Marshal CollectionInfo fail key:%s, err:%w", k, err) } - return mt.catalog.CreateAlias(mt.ctx, coll, ts) + + err = mt.snapshot.Save(k, string(v), ts) + if err != nil { + log.Error("SnapShotKV Save fail", zap.Error(err)) + panic("SnapShotKV Save fail") + } + return nil } // DropAlias drop collection alias func (mt *MetaTable) DropAlias(collectionAlias string, ts typeutil.Timestamp) error { mt.ddLock.Lock() defer mt.ddLock.Unlock() - collectionID, ok := mt.collAlias2ID[collectionAlias] - if !ok { + if _, ok := mt.collAlias2ID[collectionAlias]; !ok { return fmt.Errorf("alias does not exist, alias = %s", collectionAlias) } delete(mt.collAlias2ID, collectionAlias) - return mt.catalog.DropAlias(mt.ctx, collectionID, collectionAlias, ts) + delMetakeys := []string{ + fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collectionAlias), + } + meta := make(map[string]string) + err := mt.snapshot.MultiSaveAndRemoveWithPrefix(meta, delMetakeys, ts) + if err != nil { + log.Error("SnapShotKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err)) + panic("SnapShotKV MultiSaveAndRemoveWithPrefix fail") + } + return nil } // AlterAlias alter collection alias @@ -1036,11 +1315,20 @@ func (mt *MetaTable) AlterAlias(collectionAlias string, collectionName string, t } mt.collAlias2ID[collectionAlias] = id - coll := &model.Collection{ - CollectionID: id, - Aliases: []string{collectionAlias}, + k := fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collectionAlias) + v, err := proto.Marshal(&pb.CollectionInfo{ID: id, Schema: &schemapb.CollectionSchema{Name: collectionAlias}}) + if err != nil { + log.Error("MetaTable AlterAlias Marshal CollectionInfo fail", + zap.String("key", k), zap.Error(err)) + return fmt.Errorf("metaTable AlterAlias Marshal CollectionInfo fail key:%s, err:%w", k, err) } - return mt.catalog.AlterAlias(mt.ctx, coll, ts) + + err = mt.snapshot.Save(k, string(v), ts) + if err != nil { + log.Error("SnapShotKV Save fail", zap.Error(err)) + panic("SnapShotKV Save fail") + } + return nil } // IsAlias returns true if specific `collectionAlias` is an alias of collection. @@ -1053,33 +1341,80 @@ func (mt *MetaTable) IsAlias(collectionAlias string) bool { // AddCredential add credential func (mt *MetaTable) AddCredential(credInfo *internalpb.CredentialInfo) error { + mt.credLock.Lock() + defer mt.credLock.Unlock() + if credInfo.Username == "" { return fmt.Errorf("username is empty") } - - credential := &model.Credential{ - Username: credInfo.Username, - EncryptedPassword: credInfo.EncryptedPassword, + k := fmt.Sprintf("%s/%s", CredentialPrefix, credInfo.Username) + v, err := json.Marshal(&internalpb.CredentialInfo{EncryptedPassword: credInfo.EncryptedPassword}) + if err != nil { + log.Error("MetaTable marshal credential info fail", zap.String("key", k), zap.Error(err)) + return fmt.Errorf("metaTable marshal credential info fail key:%s, err:%w", k, err) } - return mt.catalog.CreateCredential(mt.ctx, credential) + err = mt.txn.Save(k, string(v)) + if err != nil { + log.Error("MetaTable save fail", zap.Error(err)) + return fmt.Errorf("save credential fail key:%s, err:%w", credInfo.Username, err) + } + return nil } // GetCredential get credential by username func (mt *MetaTable) getCredential(username string) (*internalpb.CredentialInfo, error) { - credential, err := mt.catalog.GetCredential(mt.ctx, username) - return model.ConvertToCredentialPB(credential), err + mt.credLock.RLock() + defer mt.credLock.RUnlock() + + k := fmt.Sprintf("%s/%s", CredentialPrefix, username) + v, err := mt.txn.Load(k) + if err != nil { + log.Warn("MetaTable load fail", zap.String("key", k), zap.Error(err)) + return nil, err + } + + credentialInfo := internalpb.CredentialInfo{} + err = json.Unmarshal([]byte(v), &credentialInfo) + if err != nil { + return nil, fmt.Errorf("get credential unmarshal err:%w", err) + } + return &internalpb.CredentialInfo{Username: username, EncryptedPassword: credentialInfo.EncryptedPassword}, nil } // DeleteCredential delete credential func (mt *MetaTable) DeleteCredential(username string) error { - return mt.catalog.DropCredential(mt.ctx, username) + mt.credLock.Lock() + defer mt.credLock.Unlock() + + k := fmt.Sprintf("%s/%s", CredentialPrefix, username) + + err := mt.txn.Remove(k) + if err != nil { + log.Error("MetaTable remove fail", zap.Error(err)) + return fmt.Errorf("remove credential fail key:%s, err:%w", username, err) + } + return nil } // ListCredentialUsernames list credential usernames func (mt *MetaTable) ListCredentialUsernames() (*milvuspb.ListCredUsersResponse, error) { - usernames, err := mt.catalog.ListCredentials(mt.ctx) + mt.credLock.RLock() + defer mt.credLock.RUnlock() + + keys, _, err := mt.txn.LoadWithPrefix(CredentialPrefix) if err != nil { - return nil, fmt.Errorf("list credential usernames err:%w", err) + log.Error("MetaTable list all credential usernames fail", zap.Error(err)) + return &milvuspb.ListCredUsersResponse{}, err + } + + var usernames []string + for _, path := range keys { + username := typeutil.After(path, UserSubPrefix+"/") + if len(username) == 0 { + log.Warn("no username extract from path:", zap.String("path", path)) + continue + } + usernames = append(usernames, username) } return &milvuspb.ListCredUsersResponse{Usernames: usernames}, nil } diff --git a/internal/rootcoord/meta_table_test.go b/internal/rootcoord/meta_table_test.go index 97bca93b37..c2d6ce4fd1 100644 --- a/internal/rootcoord/meta_table_test.go +++ b/internal/rootcoord/meta_table_test.go @@ -17,7 +17,6 @@ package rootcoord import ( - "context" "errors" "fmt" "math/rand" @@ -26,15 +25,14 @@ import ( "testing" "time" + "github.com/milvus-io/milvus/internal/proto/internalpb" + "github.com/golang/protobuf/proto" "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" memkv "github.com/milvus-io/milvus/internal/kv/mem" - kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv" - "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/proto/commonpb" pb "github.com/milvus-io/milvus/internal/proto/etcdpb" - "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/util/etcd" "github.com/milvus-io/milvus/internal/util/typeutil" @@ -116,48 +114,65 @@ func Test_MockKV(t *testing.T) { return nil, nil, fmt.Errorf("load prefix error") } - _, err := NewMetaTable(context.TODO(), kt, k1) + _, err := NewMetaTable(kt, k1) assert.NotNil(t, err) assert.EqualError(t, err, "load prefix error") - // collection - prefix[kvmetestore.CollectionMetaPrefix] = []string{"collection-meta"} - _, err = NewMetaTable(context.TODO(), kt, k1) + // proxy + prefix[ProxyMetaPrefix] = []string{"porxy-meta"} + _, err = NewMetaTable(kt, k1) assert.NotNil(t, err) - value, err := proto.Marshal(&pb.CollectionInfo{Schema: &schemapb.CollectionSchema{}}) + value, err := proto.Marshal(&pb.ProxyMeta{}) assert.Nil(t, err) - prefix[kvmetestore.CollectionMetaPrefix] = []string{string(value)} - _, err = NewMetaTable(context.TODO(), kt, k1) + prefix[ProxyMetaPrefix] = []string{string(value)} + _, err = NewMetaTable(kt, k1) + assert.NotNil(t, err) + + // collection + prefix[CollectionMetaPrefix] = []string{"collection-meta"} + _, err = NewMetaTable(kt, k1) + assert.NotNil(t, err) + + value, err = proto.Marshal(&pb.CollectionInfo{Schema: &schemapb.CollectionSchema{}}) + assert.Nil(t, err) + prefix[CollectionMetaPrefix] = []string{string(value)} + _, err = NewMetaTable(kt, k1) assert.NotNil(t, err) // segment index - prefix[kvmetestore.SegmentIndexMetaPrefix] = []string{"segment-index-meta"} - _, err = NewMetaTable(context.TODO(), kt, k1) + prefix[SegmentIndexMetaPrefix] = []string{"segment-index-meta"} + _, err = NewMetaTable(kt, k1) assert.NotNil(t, err) value, err = proto.Marshal(&pb.SegmentIndexInfo{}) assert.Nil(t, err) - prefix[kvmetestore.SegmentIndexMetaPrefix] = []string{string(value)} - _, err = NewMetaTable(context.TODO(), kt, k1) + prefix[SegmentIndexMetaPrefix] = []string{string(value)} + _, err = NewMetaTable(kt, k1) assert.NotNil(t, err) - prefix[kvmetestore.SegmentIndexMetaPrefix] = []string{string(value), string(value)} - _, err = NewMetaTable(context.TODO(), kt, k1) + prefix[SegmentIndexMetaPrefix] = []string{string(value), string(value)} + _, err = NewMetaTable(kt, k1) assert.NotNil(t, err) assert.EqualError(t, err, "load prefix error") // index - prefix[kvmetestore.IndexMetaPrefix] = []string{"index-meta"} - _, err = NewMetaTable(context.TODO(), kt, k1) + prefix[IndexMetaPrefix] = []string{"index-meta"} + _, err = NewMetaTable(kt, k1) assert.NotNil(t, err) value, err = proto.Marshal(&pb.IndexInfo{}) assert.Nil(t, err) - prefix[kvmetestore.IndexMetaPrefix] = []string{string(value)} - _, err = NewMetaTable(context.TODO(), kt, k1) + prefix[IndexMetaPrefix] = []string{string(value)} + m1, err := NewMetaTable(kt, k1) assert.NotNil(t, err) assert.EqualError(t, err, "load prefix error") + prefix[CollectionAliasMetaPrefix] = []string{"alias-meta"} + + k1.save = func(key string, value string, ts typeutil.Timestamp) error { + return fmt.Errorf("save proxy error") + } + assert.Panics(t, func() { m1.AddProxy(&pb.ProxyMeta{}) }) } func TestMetaTable(t *testing.T) { @@ -197,63 +212,60 @@ func TestMetaTable(t *testing.T) { require.Nil(t, err) defer etcdCli.Close() - skv, err := kvmetestore.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) + skv, err := newMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) assert.Nil(t, err) assert.NotNil(t, skv) txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath) - mt, err := NewMetaTable(context.TODO(), txnKV, skv) + mt, err := NewMetaTable(txnKV, skv) assert.Nil(t, err) - collInfo := &model.Collection{ - CollectionID: collID, - Name: collName, - AutoID: false, - Fields: []*model.Field{ - { - FieldID: fieldID, - Name: "field110", - IsPrimaryKey: false, - Description: "", - DataType: schemapb.DataType_FloatVector, - TypeParams: []*commonpb.KeyValuePair{ - { - Key: "field110-k1", - Value: "field110-v1", + collInfo := &pb.CollectionInfo{ + ID: collID, + Schema: &schemapb.CollectionSchema{ + Name: collName, + AutoID: false, + Fields: []*schemapb.FieldSchema{ + { + FieldID: fieldID, + Name: "field110", + IsPrimaryKey: false, + Description: "", + DataType: schemapb.DataType_FloatVector, + TypeParams: []*commonpb.KeyValuePair{ + { + Key: "field110-k1", + Value: "field110-v1", + }, + { + Key: "field110-k2", + Value: "field110-v2", + }, }, - { - Key: "field110-k2", - Value: "field110-v2", - }, - }, - IndexParams: []*commonpb.KeyValuePair{ - { - Key: "field110-i1", - Value: "field110-v1", - }, - { - Key: "field110-i2", - Value: "field110-v2", + IndexParams: []*commonpb.KeyValuePair{ + { + Key: "field110-i1", + Value: "field110-v1", + }, + { + Key: "field110-i2", + Value: "field110-v2", + }, }, }, }, }, - FieldIndexes: []*model.Index{ + FieldIndexes: []*pb.FieldIndexInfo{ { - FieldID: fieldID, + FiledID: fieldID, IndexID: indexID, }, }, - CreateTime: 0, - Partitions: []*model.Partition{ - { - PartitionID: partIDDefault, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: 0, - }, - }, + CreateTime: 0, + PartitionIDs: []typeutil.UniqueID{partIDDefault}, + PartitionNames: []string{Params.CommonCfg.DefaultPartitionName}, + PartitionCreatedTimestamps: []uint64{0}, } - - idxInfo := []*model.Index{ + idxInfo := []*pb.IndexInfo{ { IndexName: indexName, IndexID: indexID, @@ -270,30 +282,30 @@ func TestMetaTable(t *testing.T) { }, } - mt.indexID2Meta[indexID] = *idxInfo[0] - var wg sync.WaitGroup wg.Add(1) t.Run("add collection", func(t *testing.T) { defer wg.Done() ts := ftso() + err = mt.AddCollection(collInfo, ts, nil, "") + assert.NotNil(t, err) - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) assert.Equal(t, uint64(1), ts) - collMeta, err := mt.GetCollectionByName(collName, ts) + collMeta, err := mt.GetCollectionByName(collName, 0) assert.Nil(t, err) assert.Equal(t, collMeta.CreateTime, ts) - assert.Equal(t, collMeta.Partitions[0].PartitionCreatedTimestamp, ts) + assert.Equal(t, collMeta.PartitionCreatedTimestamps[0], ts) - assert.Equal(t, partIDDefault, collMeta.Partitions[0].PartitionID) - assert.Equal(t, 1, len(collMeta.Partitions)) - assert.True(t, mt.HasCollection(collInfo.CollectionID, 0)) + assert.Equal(t, partIDDefault, collMeta.PartitionIDs[0]) + assert.Equal(t, 1, len(collMeta.PartitionIDs)) + assert.True(t, mt.HasCollection(collInfo.ID, 0)) field, err := mt.GetFieldSchema(collName, "field110") assert.Nil(t, err) - assert.Equal(t, collInfo.Fields[0].FieldID, field.FieldID) + assert.Equal(t, collInfo.Schema.Fields[0].FieldID, field.FieldID) // check DD operation flag flag, err := mt.snapshot.Load(DDMsgSendPrefix, 0) @@ -342,9 +354,9 @@ func TestMetaTable(t *testing.T) { collMeta, ok := mt.collID2Meta[collID] assert.True(t, ok) - assert.Equal(t, 2, len(collMeta.Partitions)) - assert.Equal(t, collMeta.Partitions[1].PartitionName, partName) - assert.Equal(t, ts, collMeta.Partitions[1].PartitionCreatedTimestamp) + assert.Equal(t, 2, len(collMeta.PartitionNames)) + assert.Equal(t, collMeta.PartitionNames[1], partName) + assert.Equal(t, ts, collMeta.PartitionCreatedTimestamps[1]) // check DD operation flag flag, err := mt.txn.Load(DDMsgSendPrefix) @@ -355,33 +367,25 @@ func TestMetaTable(t *testing.T) { wg.Add(1) t.Run("add segment index", func(t *testing.T) { defer wg.Done() - index := model.Index{ + segIdxInfo := pb.SegmentIndexInfo{ CollectionID: collID, + PartitionID: partID, + SegmentID: segID, FieldID: fieldID, IndexID: indexID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: partID, - }, - BuildID: buildID, - EnableIndex: true, - }, - }, + BuildID: buildID, } - err = mt.AlterIndex(&index) + err = mt.AddIndex(&segIdxInfo) assert.Nil(t, err) // it's legal to add index twice - err = mt.AlterIndex(&index) + err = mt.AddIndex(&segIdxInfo) assert.Nil(t, err) - idxMeta, ok := mt.segID2IndexMeta[segID] - assert.True(t, ok) - segMeta, ok := idxMeta[indexID] - assert.True(t, ok) - assert.True(t, segMeta.SegmentIndexes[segID].EnableIndex) + segIdxInfo.BuildID = 202 + err = mt.AddIndex(&segIdxInfo) + assert.NotNil(t, err) + assert.EqualError(t, err, fmt.Sprintf("index id = %d exist", segIdxInfo.IndexID)) }) wg.Add(1) @@ -397,13 +401,13 @@ func TestMetaTable(t *testing.T) { Value: "field110-v2", }, } - idxInfo := &model.Index{ + idxInfo := &pb.IndexInfo{ IndexName: "testColl_index_110", IndexID: indexID, IndexParams: params, } - err := mt.AddIndex("collTest", "field110", idxInfo, []typeutil.UniqueID{segID}) + _, _, err := mt.GetNotIndexedSegments("collTest", "field110", idxInfo, nil) assert.NotNil(t, err) }) @@ -421,17 +425,7 @@ func TestMetaTable(t *testing.T) { }, } - tparams := []*commonpb.KeyValuePair{ - { - Key: "field110-k1", - Value: "field110-v1", - }, - { - Key: "field110-k2", - Value: "field110-v2", - }, - } - idxInfo := &model.Index{ + idxInfo := &pb.IndexInfo{ IndexName: "field110", IndexID: 2000, IndexParams: params, @@ -439,11 +433,8 @@ func TestMetaTable(t *testing.T) { _, _, err := mt.GetNotIndexedSegments("collTest", "field110", idxInfo, nil) assert.NotNil(t, err) - seg, field, err := mt.GetNotIndexedSegments(collName, "field110", idxInfo, []typeutil.UniqueID{segID, segID2}) - assert.Nil(t, err) - assert.Equal(t, 1, len(seg)) - assert.Equal(t, segID2, seg[0]) - assert.True(t, EqualKeyPairArray(field.TypeParams, tparams)) + _, _, err = mt.GetNotIndexedSegments(collName, "field110", idxInfo, []typeutil.UniqueID{segID, segID2}) + assert.NotNil(t, err) params = []*commonpb.KeyValuePair{ { @@ -455,12 +446,8 @@ func TestMetaTable(t *testing.T) { idxInfo.IndexID = 2001 idxInfo.IndexName = "field110-1" - seg, field, err = mt.GetNotIndexedSegments(collName, "field110", idxInfo, []typeutil.UniqueID{segID, segID2}) - assert.Nil(t, err) - assert.Equal(t, 2, len(seg)) - assert.Equal(t, segID, seg[0]) - assert.Equal(t, segID2, seg[1]) - assert.True(t, EqualKeyPairArray(field.TypeParams, tparams)) + _, _, err = mt.GetNotIndexedSegments(collName, "field110", idxInfo, []typeutil.UniqueID{segID, segID2}) + assert.NotNil(t, err) }) wg.Add(1) @@ -487,6 +474,19 @@ func TestMetaTable(t *testing.T) { assert.Zero(t, len(idx)) }) + wg.Add(1) + t.Run("reload meta", func(t *testing.T) { + defer wg.Done() + po := pb.ProxyMeta{ + ID: 101, + } + err = mt.AddProxy(&po) + assert.Nil(t, err) + + _, err = NewMetaTable(txnKV, skv) + assert.Nil(t, err) + }) + wg.Add(1) t.Run("drop index", func(t *testing.T) { defer wg.Done() @@ -503,6 +503,10 @@ func TestMetaTable(t *testing.T) { assert.Nil(t, err) assert.Zero(t, len(idxs)) + _, idxs, err = mt.GetIndexByName(collName, "field110-1") + assert.Nil(t, err) + assert.Zero(t, len(idxs)) + _, err = mt.GetSegmentIndexInfoByID(segID, -1, "") assert.NotNil(t, err) }) @@ -555,11 +559,8 @@ func TestMetaTable(t *testing.T) { /////////////////////////// these tests should run at last, it only used to hit the error lines //////////////////////// txnkv := etcdkv.NewEtcdKV(etcdCli, rootPath) - mockKV := &mockTestKV{ - loadWithPrefix: func(key string, ts typeutil.Timestamp) ([]string, []string, error) { - return nil, nil, nil - }, - } + mockKV := &mockTestKV{} + mt.snapshot = mockKV mockTxnKV := &mockTestTxnKV{ TxnKV: mt.txn, loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) }, @@ -570,9 +571,7 @@ func TestMetaTable(t *testing.T) { }, remove: func(key string) error { return txnkv.Remove(key) }, } - - mt, err = NewMetaTable(context.TODO(), mockTxnKV, mockKV) - assert.Nil(t, err) + mt.txn = mockTxnKV wg.Add(1) t.Run("add collection failed", func(t *testing.T) { @@ -583,8 +582,10 @@ func TestMetaTable(t *testing.T) { mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { return fmt.Errorf("multi save error") } - collInfo.Partitions = []*model.Partition{} - assert.Error(t, mt.AddCollection(collInfo, 0, "")) + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil + assert.Panics(t, func() { mt.AddCollection(collInfo, 0, idxInfo, "") }) }) wg.Add(1) @@ -597,7 +598,7 @@ func TestMetaTable(t *testing.T) { return fmt.Errorf("multi save and remove with prefix error") } ts := ftso() - assert.Error(t, mt.DeleteCollection(collInfo.CollectionID, ts, "")) + assert.Panics(t, func() { mt.DeleteCollection(collInfo.ID, ts, "") }) }) wg.Add(1) @@ -607,15 +608,17 @@ func TestMetaTable(t *testing.T) { return nil } + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - collInfo.Partitions = []*model.Partition{} - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - mt.collID2Meta = make(map[int64]model.Collection) - _, err = mt.GetCollectionByName(collInfo.Name, 0) + mt.collID2Meta = make(map[int64]pb.CollectionInfo) + _, err = mt.GetCollectionByName(collInfo.Schema.Name, 0) assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("can't find collection %s with id %d", collInfo.Name, collInfo.CollectionID)) + assert.EqualError(t, err, fmt.Sprintf("can't find collection %s with id %d", collInfo.Schema.Name, collInfo.ID)) }) @@ -631,9 +634,11 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - collInfo.Partitions = []*model.Partition{} - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) ts = ftso() @@ -641,34 +646,41 @@ func TestMetaTable(t *testing.T) { assert.NotNil(t, err) assert.EqualError(t, err, "can't find collection. id = 2") - coll := mt.collID2Meta[collInfo.CollectionID] - coll.Partitions = make([]*model.Partition, Params.RootCoordCfg.MaxPartitionNum) - mt.collID2Meta[coll.CollectionID] = coll - err = mt.AddPartition(coll.CollectionID, "no-part", 22, ts, "") + coll := mt.collID2Meta[collInfo.ID] + coll.PartitionIDs = make([]int64, Params.RootCoordCfg.MaxPartitionNum) + mt.collID2Meta[coll.ID] = coll + err = mt.AddPartition(coll.ID, "no-part", 22, ts, "") assert.NotNil(t, err) assert.EqualError(t, err, fmt.Sprintf("maximum partition's number should be limit to %d", Params.RootCoordCfg.MaxPartitionNum)) - coll.Partitions = []*model.Partition{{PartitionID: partID, PartitionName: partName, PartitionCreatedTimestamp: ftso()}} - mt.collID2Meta[coll.CollectionID] = coll - + coll.PartitionIDs = []int64{partID} + coll.PartitionNames = []string{partName} + coll.PartitionCreatedTimestamps = []uint64{ftso()} + mt.collID2Meta[coll.ID] = coll mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { return fmt.Errorf("multi save error") } - assert.Error(t, mt.AddPartition(coll.CollectionID, "no-part", 22, ts, "")) - //err = mt.AddPartition(coll.CollectionID, "no-part", 22, ts, nil) + assert.Panics(t, func() { mt.AddPartition(coll.ID, "no-part", 22, ts, "") }) + //err = mt.AddPartition(coll.ID, "no-part", 22, ts, nil) //assert.NotNil(t, err) //assert.EqualError(t, err, "multi save error") mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { return nil } + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil - collInfo.Partitions = []*model.Partition{} + //_, err = mt.AddCollection(collInfo, idxInfo, nil) + //assert.Nil(t, err) + //_, err = mt.AddPartition(coll.ID, partName, partID, nil) + //assert.Nil(t, err) ts = ftso() - err = mt.AddPartition(coll.CollectionID, partName, 22, ts, "") + err = mt.AddPartition(coll.ID, partName, 22, ts, "") assert.NotNil(t, err) assert.EqualError(t, err, fmt.Sprintf("partition name = %s already exists", partName)) - err = mt.AddPartition(coll.CollectionID, "no-part", partID, ts, "") + err = mt.AddPartition(coll.ID, "no-part", partID, ts, "") assert.NotNil(t, err) assert.EqualError(t, err, fmt.Sprintf("partition id = %d already exists", partID)) }) @@ -685,15 +697,17 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - assert.False(t, mt.HasPartition(collInfo.CollectionID, "no-partName", 0)) + assert.False(t, mt.HasPartition(collInfo.ID, "no-partName", 0)) - mt.collID2Meta = make(map[int64]model.Collection) - assert.False(t, mt.HasPartition(collInfo.CollectionID, partName, 0)) + mt.collID2Meta = make(map[int64]pb.CollectionInfo) + assert.False(t, mt.HasPartition(collInfo.ID, partName, 0)) }) wg.Add(1) @@ -708,28 +722,32 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{{PartitionID: partID, PartitionName: partName, PartitionCreatedTimestamp: ftso()}} + collInfo.PartitionIDs = []int64{partID} + collInfo.PartitionNames = []string{partName} + collInfo.PartitionCreatedTimestamps = []uint64{ftso()} ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) ts = ftso() - _, err = mt.DeletePartition(collInfo.CollectionID, Params.CommonCfg.DefaultPartitionName, ts, "") + _, err = mt.DeletePartition(collInfo.ID, Params.CommonCfg.DefaultPartitionName, ts, "") assert.NotNil(t, err) assert.EqualError(t, err, "default partition cannot be deleted") - _, err = mt.DeletePartition(collInfo.CollectionID, "abc", ts, "") + _, err = mt.DeletePartition(collInfo.ID, "abc", ts, "") assert.NotNil(t, err) assert.EqualError(t, err, "partition abc does not exist") mockKV.save = func(key, value string, ts typeutil.Timestamp) error { return errors.New("mocked error") } - _, err = mt.DeletePartition(collInfo.CollectionID, partName, ts, "") - assert.Error(t, err) + assert.Panics(t, func() { mt.DeletePartition(collInfo.ID, partName, ts, "") }) + //_, err = mt.DeletePartition(collInfo.ID, partName, ts, nil) + //assert.NotNil(t, err) + //assert.EqualError(t, err, "multi save and remove with prefix error") - mt.collID2Meta = make(map[int64]model.Collection) - _, err = mt.DeletePartition(collInfo.CollectionID, "abc", ts, "") + mt.collID2Meta = make(map[int64]pb.CollectionInfo) + _, err = mt.DeletePartition(collInfo.ID, "abc", ts, "") assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("can't find collection id = %d", collInfo.CollectionID)) + assert.EqualError(t, err, fmt.Sprintf("can't find collection id = %d", collInfo.ID)) }) wg.Add(1) @@ -747,48 +765,45 @@ func TestMetaTable(t *testing.T) { err = mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - segIdxInfo := model.Index{ + segIdxInfo := pb.SegmentIndexInfo{ CollectionID: collID, + PartitionID: partID, + SegmentID: segID, FieldID: fieldID, IndexID: indexID2, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: partID, - }, - BuildID: buildID, - }, - }, + BuildID: buildID, } - err = mt.AlterIndex(&segIdxInfo) + err = mt.AddIndex(&segIdxInfo) assert.NotNil(t, err) assert.EqualError(t, err, fmt.Sprintf("index id = %d not found", segIdxInfo.IndexID)) - mt.collID2Meta = make(map[int64]model.Collection) - err = mt.AlterIndex(&segIdxInfo) + mt.collID2Meta = make(map[int64]pb.CollectionInfo) + err = mt.AddIndex(&segIdxInfo) assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection id = %d not found", collInfo.CollectionID)) + assert.EqualError(t, err, fmt.Sprintf("collection id = %d not found", collInfo.ID)) err = mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts = ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) segIdxInfo.IndexID = indexID - mt.indexID2Meta[indexID] = segIdxInfo - mockTxnKV.multiSave = func(kvs map[string]string) error { + mockTxnKV.save = func(key string, value string) error { return fmt.Errorf("save error") } - assert.Error(t, mt.AlterIndex(&segIdxInfo)) + assert.Panics(t, func() { mt.AddIndex(&segIdxInfo) }) }) wg.Add(1) @@ -806,11 +821,12 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - mt.indexID2Meta[indexID] = *idxInfo[0] _, _, err = mt.DropIndex("abc", "abc", "abc") assert.NotNil(t, err) @@ -821,42 +837,40 @@ func TestMetaTable(t *testing.T) { assert.NotNil(t, err) assert.EqualError(t, err, "collection name = abc not has meta") - _, _, err = mt.DropIndex(collInfo.Name, "abc", "abc") + _, _, err = mt.DropIndex(collInfo.Schema.Name, "abc", "abc") assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection %s doesn't have filed abc", collInfo.Name)) + assert.EqualError(t, err, fmt.Sprintf("collection %s doesn't have filed abc", collInfo.Schema.Name)) - coll := mt.collID2Meta[collInfo.CollectionID] - coll.FieldIndexes = []*model.Index{ + coll := mt.collID2Meta[collInfo.ID] + coll.FieldIndexes = []*pb.FieldIndexInfo{ { - FieldID: fieldID2, + FiledID: fieldID2, IndexID: indexID2, }, { - FieldID: fieldID, + FiledID: fieldID, IndexID: indexID, }, } - mt.collID2Meta[coll.CollectionID] = coll - mt.indexID2Meta = make(map[int64]model.Index) - idxID, isDroped, err := mt.DropIndex(collInfo.Name, collInfo.Fields[0].Name, idxInfo[0].IndexName) + mt.collID2Meta[coll.ID] = coll + mt.indexID2Meta = make(map[int64]pb.IndexInfo) + idxID, isDroped, err := mt.DropIndex(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0].IndexName) assert.Zero(t, idxID) assert.False(t, isDroped) assert.Nil(t, err) err = mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + coll.PartitionCreatedTimestamps = nil ts = ftso() - err = mt.AddCollection(collInfo, ts, "") - mt.indexID2Meta[indexID] = *idxInfo[0] - + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) mockTxnKV.multiSaveAndRemoveWithPrefix = func(saves map[string]string, removals []string) error { return fmt.Errorf("multi save and remove with prefix error") } - - _, _, err = mt.DropIndex(collInfo.Name, collInfo.Fields[0].Name, idxInfo[0].IndexName) - assert.Error(t, err) + assert.Panics(t, func() { mt.DropIndex(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0].IndexName) }) }) wg.Add(1) @@ -874,45 +888,40 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - mt.indexID2Meta[indexID] = *idxInfo[0] - index, err := mt.GetSegmentIndexInfoByID(segID2, fieldID, "abc") + seg, err := mt.GetSegmentIndexInfoByID(segID2, fieldID, "abc") assert.Nil(t, err) - assert.Equal(t, segID2, index.SegmentIndexes[segID2].Segment.SegmentID) - assert.Equal(t, fieldID, index.FieldID) - assert.Equal(t, false, index.SegmentIndexes[segID2].EnableIndex) + assert.Equal(t, segID2, seg.SegmentID) + assert.Equal(t, fieldID, seg.FieldID) + assert.Equal(t, false, seg.EnableIndex) - segIdxInfo := model.Index{ + segIdxInfo := pb.SegmentIndexInfo{ CollectionID: collID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: partID, - }, - BuildID: buildID, - }, - }, - FieldID: fieldID, - IndexID: indexID, + PartitionID: partID, + SegmentID: segID, + FieldID: fieldID, + IndexID: indexID, + BuildID: buildID, } - err = mt.AlterIndex(&segIdxInfo) + err = mt.AddIndex(&segIdxInfo) assert.Nil(t, err) - idx, err := mt.GetSegmentIndexInfoByID(segID, segIdxInfo.FieldID, idxInfo[0].IndexName) + idx, err := mt.GetSegmentIndexInfoByID(segIdxInfo.SegmentID, segIdxInfo.FieldID, idxInfo[0].IndexName) assert.Nil(t, err) assert.Equal(t, segIdxInfo.IndexID, idx.IndexID) - _, err = mt.GetSegmentIndexInfoByID(segID, segIdxInfo.FieldID, "abc") + _, err = mt.GetSegmentIndexInfoByID(segIdxInfo.SegmentID, segIdxInfo.FieldID, "abc") assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("can't find index name = abc on segment = %d, with filed id = %d", segID, segIdxInfo.FieldID)) + assert.EqualError(t, err, fmt.Sprintf("can't find index name = abc on segment = %d, with filed id = %d", segIdxInfo.SegmentID, segIdxInfo.FieldID)) - _, err = mt.GetSegmentIndexInfoByID(segID, 11, idxInfo[0].IndexName) + _, err = mt.GetSegmentIndexInfoByID(segIdxInfo.SegmentID, 11, idxInfo[0].IndexName) assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("can't find index name = %s on segment = %d, with filed id = 11", idxInfo[0].IndexName, segID)) + assert.EqualError(t, err, fmt.Sprintf("can't find index name = %s on segment = %d, with filed id = 11", idxInfo[0].IndexName, segIdxInfo.SegmentID)) }) wg.Add(1) @@ -930,20 +939,22 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - mt.collID2Meta = make(map[int64]model.Collection) - _, err = mt.unlockGetFieldSchema(collInfo.Name, collInfo.Fields[0].Name) + mt.collID2Meta = make(map[int64]pb.CollectionInfo) + _, err = mt.unlockGetFieldSchema(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name) assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Name)) + assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Schema.Name)) mt.collName2ID = make(map[string]int64) - _, err = mt.unlockGetFieldSchema(collInfo.Name, collInfo.Fields[0].Name) + _, err = mt.unlockGetFieldSchema(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name) assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Name)) + assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Schema.Name)) }) wg.Add(1) @@ -962,7 +973,7 @@ func TestMetaTable(t *testing.T) { idxMeta := make(map[int64]pb.SegmentIndexInfo) idxMeta[idx.IndexID] = *idx - field := model.Field{ + field := schemapb.FieldSchema{ FieldID: 31, } assert.False(t, mt.IsSegmentIndexed(idx.SegmentID, &field, nil)) @@ -979,7 +990,7 @@ func TestMetaTable(t *testing.T) { err := mt.reloadFromKV() assert.Nil(t, err) - idx := &model.Index{ + idx := &pb.IndexInfo{ IndexName: "no-idx", IndexID: 456, IndexParams: []*commonpb.KeyValuePair{ @@ -993,7 +1004,6 @@ func TestMetaTable(t *testing.T) { mt.collName2ID["abc"] = 123 _, _, err = mt.GetNotIndexedSegments("abc", "no-field", idx, nil) assert.NotNil(t, err) - assert.EqualError(t, err, "collection abc not found") mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error { return nil @@ -1004,18 +1014,26 @@ func TestMetaTable(t *testing.T) { err = mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - _, _, err = mt.GetNotIndexedSegments(collInfo.Name, "no-field", idx, nil) + _, _, err = mt.GetNotIndexedSegments(collInfo.Schema.Name, "no-field", idx, nil) assert.NotNil(t, err) - assert.EqualError(t, err, fmt.Sprintf("collection %s doesn't have filed no-field", collInfo.Name)) + assert.EqualError(t, err, fmt.Sprintf("collection %s doesn't have filed no-field", collInfo.Schema.Name)) - mt.indexID2Meta = make(map[int64]model.Index) - _, _, err = mt.GetNotIndexedSegments(collInfo.Name, collInfo.Fields[0].Name, idx, nil) - assert.Nil(t, err) + bakMeta := mt.indexID2Meta + mt.indexID2Meta = make(map[int64]pb.IndexInfo) + mockTxnKV.multiSave = func(kvs map[string]string) error { + return fmt.Errorf("multi save error") + } + assert.Panics(t, func() { + _, _, _ = mt.GetNotIndexedSegments(collInfo.Schema.Name, collInfo.Schema.Fields[0].Name, idxInfo[0], []UniqueID{10001, 10002}) + }) + mt.indexID2Meta = bakMeta }) wg.Add(1) @@ -1041,12 +1059,14 @@ func TestMetaTable(t *testing.T) { err = mt.reloadFromKV() assert.Nil(t, err) - collInfo.Partitions = []*model.Partition{} + collInfo.PartitionIDs = nil + collInfo.PartitionNames = nil + collInfo.PartitionCreatedTimestamps = nil ts := ftso() - err = mt.AddCollection(collInfo, ts, "") + err = mt.AddCollection(collInfo, ts, idxInfo, "") assert.Nil(t, err) - mt.indexID2Meta = make(map[int64]model.Index) - _, _, err = mt.GetIndexByName(collInfo.Name, idxInfo[0].IndexName) + mt.indexID2Meta = make(map[int64]pb.IndexInfo) + _, _, err = mt.GetIndexByName(collInfo.Schema.Name, idxInfo[0].IndexName) assert.NotNil(t, err) assert.EqualError(t, err, fmt.Sprintf("index id = %d not found", idxInfo[0].IndexID)) @@ -1103,28 +1123,35 @@ func TestMetaWithTimestamp(t *testing.T) { assert.Nil(t, err) defer etcdCli.Close() - skv, err := kvmetestore.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) + skv, err := newMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) assert.Nil(t, err) assert.NotNil(t, skv) txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath) - mt, err := NewMetaTable(context.TODO(), txnKV, skv) + mt, err := NewMetaTable(txnKV, skv) assert.Nil(t, err) - collInfo := &model.Collection{ - CollectionID: collID1, - Name: collName1, + collInfo := &pb.CollectionInfo{ + ID: 1, + Schema: &schemapb.CollectionSchema{ + Name: collName1, + }, } - collInfo.Partitions = []*model.Partition{{PartitionID: partID1, PartitionName: partName1, PartitionCreatedTimestamp: ftso()}} + collInfo.PartitionIDs = []int64{partID1} + collInfo.PartitionNames = []string{partName1} + collInfo.PartitionCreatedTimestamps = []uint64{ftso()} t1 := ftso() - err = mt.AddCollection(collInfo, t1, "") + err = mt.AddCollection(collInfo, t1, nil, "") assert.Nil(t, err) - collInfo.CollectionID = collID2 - collInfo.Partitions = []*model.Partition{{PartitionID: partID2, PartitionName: partName2, PartitionCreatedTimestamp: ftso()}} - collInfo.Name = collName2 + collInfo.ID = 2 + collInfo.PartitionIDs = []int64{partID2} + collInfo.PartitionNames = []string{partName2} + collInfo.PartitionCreatedTimestamps = []uint64{ftso()} + collInfo.Schema.Name = collName2 + t2 := ftso() - err = mt.AddCollection(collInfo, t2, "") + err = mt.AddCollection(collInfo, t2, nil, "") assert.Nil(t, err) assert.True(t, mt.HasCollection(collID1, 0)) @@ -1143,21 +1170,21 @@ func TestMetaWithTimestamp(t *testing.T) { assert.Nil(t, err) c2, err := mt.GetCollectionByID(collID2, 0) assert.Nil(t, err) - assert.Equal(t, collID1, c1.CollectionID) - assert.Equal(t, collID2, c2.CollectionID) + assert.Equal(t, collID1, c1.ID) + assert.Equal(t, collID2, c2.ID) c1, err = mt.GetCollectionByID(collID1, t2) assert.Nil(t, err) c2, err = mt.GetCollectionByID(collID2, t2) assert.Nil(t, err) - assert.Equal(t, collID1, c1.CollectionID) - assert.Equal(t, collID2, c2.CollectionID) + assert.Equal(t, collID1, c1.ID) + assert.Equal(t, collID2, c2.ID) c1, err = mt.GetCollectionByID(collID1, t1) assert.Nil(t, err) c2, err = mt.GetCollectionByID(collID2, t1) assert.NotNil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) + assert.Equal(t, int64(1), c1.ID) c1, err = mt.GetCollectionByID(collID1, tsoStart) assert.NotNil(t, err) @@ -1168,28 +1195,28 @@ func TestMetaWithTimestamp(t *testing.T) { assert.Nil(t, err) c2, err = mt.GetCollectionByName(collName2, 0) assert.Nil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) - assert.Equal(t, int64(2), c2.CollectionID) + assert.Equal(t, int64(1), c1.ID) + assert.Equal(t, int64(2), c2.ID) c1, err = mt.GetCollectionByName(collName1, t2) assert.Nil(t, err) c2, err = mt.GetCollectionByName(collName2, t2) assert.Nil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) - assert.Equal(t, int64(2), c2.CollectionID) + assert.Equal(t, int64(1), c1.ID) + assert.Equal(t, int64(2), c2.ID) c1, err = mt.GetCollectionByName(collName1, t1) assert.Nil(t, err) c2, err = mt.GetCollectionByName(collName2, t1) assert.NotNil(t, err) - assert.Equal(t, int64(1), c1.CollectionID) + assert.Equal(t, int64(1), c1.ID) c1, err = mt.GetCollectionByName(collName1, tsoStart) assert.NotNil(t, err) c2, err = mt.GetCollectionByName(collName2, tsoStart) assert.NotNil(t, err) - getKeys := func(m map[string]*model.Collection) []string { + getKeys := func(m map[string]*pb.CollectionInfo) []string { keys := make([]string, 0, len(m)) for key := range m { keys = append(keys, key) @@ -1216,16 +1243,17 @@ func TestMetaWithTimestamp(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 0, len(s1)) - p1, err := mt.GetPartitionByName(collID1, partName1, 0) + p1, err := mt.GetPartitionByName(1, partName1, 0) assert.Nil(t, err) - p2, err := mt.GetPartitionByName(collID2, partName2, 0) + p2, err := mt.GetPartitionByName(2, partName2, 0) + assert.Nil(t, err) + assert.Equal(t, int64(11), p1) + assert.Equal(t, int64(12), p2) assert.Nil(t, err) - assert.Equal(t, int64(partID1), p1) - assert.Equal(t, int64(partID2), p2) - p1, err = mt.GetPartitionByName(collID1, partName1, t2) + p1, err = mt.GetPartitionByName(1, partName1, t2) assert.Nil(t, err) - p2, err = mt.GetPartitionByName(collID2, partName2, t2) + p2, err = mt.GetPartitionByName(2, partName2, t2) assert.Nil(t, err) assert.Equal(t, int64(11), p1) assert.Equal(t, int64(12), p2) @@ -1260,72 +1288,65 @@ func TestFixIssue10540(t *testing.T) { assert.Nil(t, err) defer etcdCli.Close() - skv, err := kvmetestore.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) + skv, err := newMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7) assert.Nil(t, err) assert.NotNil(t, skv) //txnKV := etcdkv.NewEtcdKVWithClient(etcdCli, rootPath) txnKV := memkv.NewMemoryKV() // compose rc7 legace tombstone cases - txnKV.Save(path.Join(kvmetestore.SegmentIndexMetaPrefix, "2"), string(kvmetestore.SuffixSnapshotTombstone)) - txnKV.Save(path.Join(kvmetestore.IndexMetaPrefix, "3"), string(kvmetestore.SuffixSnapshotTombstone)) + txnKV.Save(path.Join(ProxyMetaPrefix, "1"), string(suffixSnapshotTombstone)) + txnKV.Save(path.Join(SegmentIndexMetaPrefix, "2"), string(suffixSnapshotTombstone)) + txnKV.Save(path.Join(IndexMetaPrefix, "3"), string(suffixSnapshotTombstone)) - _, err = NewMetaTable(context.TODO(), txnKV, skv) + _, err = NewMetaTable(txnKV, skv) assert.Nil(t, err) } func TestMetaTable_GetSegmentIndexInfos(t *testing.T) { meta := &MetaTable{ - segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index{}, + segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo{}, } segID := typeutil.UniqueID(100) _, err := meta.GetSegmentIndexInfos(segID) assert.Error(t, err) - meta.segID2IndexMeta[segID] = map[typeutil.UniqueID]model.Index{ + meta.segID2IndexMeta[segID] = map[typeutil.UniqueID]pb.SegmentIndexInfo{ 5: { CollectionID: 1, + PartitionID: 2, + SegmentID: segID, FieldID: 4, IndexID: 5, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: 2, - }, - BuildID: 6, - EnableIndex: true, - }, - }, + BuildID: 6, + EnableIndex: true, }, } infos, err := meta.GetSegmentIndexInfos(segID) assert.NoError(t, err) indexInfos, ok := infos[5] assert.True(t, ok) - assert.Equal(t, typeutil.UniqueID(1), indexInfos.CollectionID) - segmentIndex := indexInfos.SegmentIndexes[segID] - segment := segmentIndex.Segment - assert.Equal(t, typeutil.UniqueID(2), segment.PartitionID) - assert.Equal(t, segID, segment.SegmentID) - assert.Equal(t, typeutil.UniqueID(4), indexInfos.FieldID) - assert.Equal(t, typeutil.UniqueID(5), indexInfos.IndexID) - assert.Equal(t, typeutil.UniqueID(6), segmentIndex.BuildID) - assert.Equal(t, true, segmentIndex.EnableIndex) + assert.Equal(t, typeutil.UniqueID(1), indexInfos.GetCollectionID()) + assert.Equal(t, typeutil.UniqueID(2), indexInfos.GetPartitionID()) + assert.Equal(t, segID, indexInfos.GetSegmentID()) + assert.Equal(t, typeutil.UniqueID(4), indexInfos.GetFieldID()) + assert.Equal(t, typeutil.UniqueID(5), indexInfos.GetIndexID()) + assert.Equal(t, typeutil.UniqueID(6), indexInfos.GetBuildID()) + assert.Equal(t, true, indexInfos.GetEnableIndex()) } func TestMetaTable_unlockGetCollectionInfo(t *testing.T) { t.Run("normal case", func(t *testing.T) { mt := &MetaTable{ collName2ID: map[string]typeutil.UniqueID{"test": 100}, - collID2Meta: map[typeutil.UniqueID]model.Collection{ - 100: {CollectionID: 100, Name: "test"}, + collID2Meta: map[typeutil.UniqueID]pb.CollectionInfo{ + 100: {ID: 100, Schema: &schemapb.CollectionSchema{Name: "test"}}, }, } info, err := mt.unlockGetCollectionInfo("test") assert.NoError(t, err) - assert.Equal(t, UniqueID(100), info.CollectionID) - assert.Equal(t, "test", info.Name) + assert.Equal(t, UniqueID(100), info.ID) + assert.Equal(t, "test", info.GetSchema().GetName()) }) t.Run("collection name not found", func(t *testing.T) { @@ -1358,29 +1379,29 @@ func TestMetaTable_unlockGetCollectionInfo(t *testing.T) { func TestMetaTable_checkFieldCanBeIndexed(t *testing.T) { t.Run("field not indexed", func(t *testing.T) { mt := &MetaTable{} - collMeta := model.Collection{ - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{ + fieldSchema := schemapb.FieldSchema{ FieldID: 101, } - idxInfo := &model.Index{} + idxInfo := &pb.IndexInfo{} err := mt.checkFieldCanBeIndexed(collMeta, fieldSchema, idxInfo) assert.NoError(t, err) }) t.Run("field already indexed", func(t *testing.T) { mt := &MetaTable{ - indexID2Meta: map[typeutil.UniqueID]model.Index{ + indexID2Meta: map[typeutil.UniqueID]pb.IndexInfo{ 200: {IndexID: 200, IndexName: "test"}, }, } - collMeta := model.Collection{ - Name: "test", - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{Name: "test"}, + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{Name: "test", FieldID: 100} - idxInfo := &model.Index{IndexName: "not_test"} + fieldSchema := schemapb.FieldSchema{Name: "test", FieldID: 100} + idxInfo := &pb.IndexInfo{IndexName: "not_test"} err := mt.checkFieldCanBeIndexed(collMeta, fieldSchema, idxInfo) assert.Error(t, err) }) @@ -1388,15 +1409,15 @@ func TestMetaTable_checkFieldCanBeIndexed(t *testing.T) { t.Run("unexpected", func(t *testing.T) { mt := &MetaTable{ // index meta incomplete. - indexID2Meta: map[typeutil.UniqueID]model.Index{}, + indexID2Meta: map[typeutil.UniqueID]pb.IndexInfo{}, } - collMeta := model.Collection{ - Name: "test", - CollectionID: 1000, - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{Name: "test"}, + ID: 1000, + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{Name: "test", FieldID: 100} - idxInfo := &model.Index{IndexName: "not_test"} + fieldSchema := schemapb.FieldSchema{Name: "test", FieldID: 100} + idxInfo := &pb.IndexInfo{IndexName: "not_test"} err := mt.checkFieldCanBeIndexed(collMeta, fieldSchema, idxInfo) assert.NoError(t, err) }) @@ -1405,50 +1426,50 @@ func TestMetaTable_checkFieldCanBeIndexed(t *testing.T) { func TestMetaTable_checkFieldIndexDuplicate(t *testing.T) { t.Run("index already exists", func(t *testing.T) { mt := &MetaTable{ - indexID2Meta: map[typeutil.UniqueID]model.Index{ + indexID2Meta: map[typeutil.UniqueID]pb.IndexInfo{ 200: {IndexID: 200, IndexName: "test"}, }, } - collMeta := model.Collection{ - Name: "test", - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{Name: "test"}, + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{Name: "test", FieldID: 101} - idxInfo := &model.Index{IndexName: "test"} + fieldSchema := schemapb.FieldSchema{Name: "test", FieldID: 101} + idxInfo := &pb.IndexInfo{IndexName: "test"} _, err := mt.checkFieldIndexDuplicate(collMeta, fieldSchema, idxInfo) assert.Error(t, err) }) t.Run("index parameters mismatch", func(t *testing.T) { mt := &MetaTable{ - indexID2Meta: map[typeutil.UniqueID]model.Index{ + indexID2Meta: map[typeutil.UniqueID]pb.IndexInfo{ 200: {IndexID: 200, IndexName: "test", IndexParams: []*commonpb.KeyValuePair{{Key: "Key", Value: "Value"}}}, }, } - collMeta := model.Collection{ - Name: "test", - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{Name: "test"}, + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{Name: "test", FieldID: 100} - idxInfo := &model.Index{IndexName: "test", IndexParams: []*commonpb.KeyValuePair{{Key: "Key", Value: "not_Value"}}} + fieldSchema := schemapb.FieldSchema{Name: "test", FieldID: 100} + idxInfo := &pb.IndexInfo{IndexName: "test", IndexParams: []*commonpb.KeyValuePair{{Key: "Key", Value: "not_Value"}}} _, err := mt.checkFieldIndexDuplicate(collMeta, fieldSchema, idxInfo) assert.Error(t, err) }) t.Run("index parameters match", func(t *testing.T) { mt := &MetaTable{ - indexID2Meta: map[typeutil.UniqueID]model.Index{ + indexID2Meta: map[typeutil.UniqueID]pb.IndexInfo{ 200: {IndexID: 200, IndexName: "test", IndexParams: []*commonpb.KeyValuePair{{Key: "Key", Value: "Value"}}}, }, } - collMeta := model.Collection{ - Name: "test", - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{Name: "test"}, + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{Name: "test", FieldID: 100} - idxInfo := &model.Index{IndexName: "test", IndexParams: []*commonpb.KeyValuePair{{Key: "Key", Value: "Value"}}} + fieldSchema := schemapb.FieldSchema{Name: "test", FieldID: 100} + idxInfo := &pb.IndexInfo{IndexName: "test", IndexParams: []*commonpb.KeyValuePair{{Key: "Key", Value: "Value"}}} duplicate, err := mt.checkFieldIndexDuplicate(collMeta, fieldSchema, idxInfo) assert.NoError(t, err) assert.True(t, duplicate) @@ -1456,13 +1477,13 @@ func TestMetaTable_checkFieldIndexDuplicate(t *testing.T) { t.Run("field not found", func(t *testing.T) { mt := &MetaTable{} - collMeta := model.Collection{ - FieldIndexes: []*model.Index{{FieldID: 100, IndexID: 200}}, + collMeta := pb.CollectionInfo{ + FieldIndexes: []*pb.FieldIndexInfo{{FiledID: 100, IndexID: 200}}, } - fieldSchema := model.Field{ + fieldSchema := schemapb.FieldSchema{ FieldID: 101, } - idxInfo := &model.Index{} + idxInfo := &pb.IndexInfo{} duplicate, err := mt.checkFieldIndexDuplicate(collMeta, fieldSchema, idxInfo) assert.NoError(t, err) assert.False(t, duplicate) diff --git a/internal/rootcoord/proxy_manager.go b/internal/rootcoord/proxy_manager.go index a931b0bab6..71f99bb5ac 100644 --- a/internal/rootcoord/proxy_manager.go +++ b/internal/rootcoord/proxy_manager.go @@ -24,7 +24,6 @@ import ( "sync" "github.com/milvus-io/milvus/internal/log" - "github.com/milvus-io/milvus/internal/metastore/kv" "github.com/milvus-io/milvus/internal/metrics" "github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/typeutil" @@ -75,7 +74,7 @@ func (p *proxyManager) DelSessionFunc(fns ...func(*sessionutil.Session)) { // WatchProxy starts a goroutine to watch proxy session changes on etcd func (p *proxyManager) WatchProxy() error { - ctx, cancel := context.WithTimeout(p.ctx, kv.RequestTimeout) + ctx, cancel := context.WithTimeout(p.ctx, RequestTimeout) defer cancel() sessions, rev, err := p.getSessionsOnEtcd(ctx) @@ -198,7 +197,7 @@ func (p *proxyManager) Stop() { // listProxyInEtcd helper function lists proxy in etcd func listProxyInEtcd(ctx context.Context, cli *clientv3.Client) (map[int64]*sessionutil.Session, error) { - ctx2, cancel := context.WithTimeout(ctx, kv.RequestTimeout) + ctx2, cancel := context.WithTimeout(ctx, RequestTimeout) defer cancel() resp, err := cli.Get( ctx2, diff --git a/internal/rootcoord/root_coord.go b/internal/rootcoord/root_coord.go index 824c4e8b39..c8859a45c1 100644 --- a/internal/rootcoord/root_coord.go +++ b/internal/rootcoord/root_coord.go @@ -31,24 +31,26 @@ import ( "time" "github.com/golang/protobuf/proto" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" "github.com/milvus-io/milvus/internal/allocator" "github.com/milvus-io/milvus/internal/common" "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" "github.com/milvus-io/milvus/internal/log" - kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv" - "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/metrics" ms "github.com/milvus-io/milvus/internal/mq/msgstream" "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/milvuspb" "github.com/milvus-io/milvus/internal/proto/proxypb" "github.com/milvus-io/milvus/internal/proto/querypb" "github.com/milvus-io/milvus/internal/proto/rootcoordpb" + "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/tso" "github.com/milvus-io/milvus/internal/types" "github.com/milvus-io/milvus/internal/util" @@ -62,8 +64,6 @@ import ( "github.com/milvus-io/milvus/internal/util/trace" "github.com/milvus-io/milvus/internal/util/tsoutil" "github.com/milvus-io/milvus/internal/util/typeutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.uber.org/zap" ) // UniqueID is an alias of typeutil.UniqueID. @@ -133,7 +133,7 @@ type Core struct { CallGetRecoveryInfoService func(ctx context.Context, collID, partID UniqueID) ([]*datapb.SegmentBinlogs, error) //call index builder's client to build index, return build id or get index state. - CallBuildIndexService func(ctx context.Context, segID UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (typeutil.UniqueID, error) + CallBuildIndexService func(ctx context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (typeutil.UniqueID, error) CallDropIndexService func(ctx context.Context, indexID typeutil.UniqueID) error CallGetIndexStatesService func(ctx context.Context, IndexBuildIDs []int64) ([]*indexpb.IndexInfo, error) @@ -363,20 +363,20 @@ func (c *Core) checkFlushedSegments(ctx context.Context) { if len(collMeta.FieldIndexes) == 0 { continue } - for _, part := range collMeta.Partitions { + for _, partID := range collMeta.PartitionIDs { ctx2, cancel2 := context.WithTimeout(ctx, 3*time.Minute) - segBinlogs, err := c.CallGetRecoveryInfoService(ctx2, collMeta.CollectionID, part.PartitionID) + segBinlogs, err := c.CallGetRecoveryInfoService(ctx2, collMeta.ID, partID) if err != nil { log.Debug("failed to get flushed segments from dataCoord", - zap.Int64("collection ID", collMeta.CollectionID), - zap.Int64("partition ID", part.PartitionID), + zap.Int64("collection ID", collMeta.GetID()), + zap.Int64("partition ID", partID), zap.Error(err)) cancel2() continue } for _, segBinlog := range segBinlogs { segID := segBinlog.SegmentID - var indexInfos []*model.Index + var indexInfos []*etcdpb.FieldIndexInfo indexMeta, ok := segID2IndexMeta[segID] if !ok { indexInfos = append(indexInfos, collMeta.FieldIndexes...) @@ -389,11 +389,11 @@ func (c *Core) checkFlushedSegments(ctx context.Context) { } for _, idxInfo := range indexInfos { /* #nosec G601 */ - field, err := GetFieldSchemaByID(&collMeta, idxInfo.FieldID) + field, err := GetFieldSchemaByID(&collMeta, idxInfo.FiledID) if err != nil { log.Debug("GetFieldSchemaByID", zap.Any("collection_meta", collMeta), - zap.Int64("field id", idxInfo.FieldID)) + zap.Int64("field id", idxInfo.FiledID)) continue } indexMeta, ok := indexID2Meta[idxInfo.IndexID] @@ -401,26 +401,19 @@ func (c *Core) checkFlushedSegments(ctx context.Context) { log.Debug("index meta does not exist", zap.Int64("index_id", idxInfo.IndexID)) continue } - info := model.Index{ - CollectionID: collMeta.CollectionID, - FieldID: idxInfo.FieldID, + info := etcdpb.SegmentIndexInfo{ + CollectionID: collMeta.ID, + PartitionID: partID, + SegmentID: segID, + FieldID: idxInfo.FiledID, IndexID: idxInfo.IndexID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: part.PartitionID, - }, - EnableIndex: false, - }, - }, + EnableIndex: false, } log.Debug("building index by background checker", zap.Int64("segment_id", segID), zap.Int64("index_id", indexMeta.IndexID), - zap.Int64("collection_id", collMeta.CollectionID)) - segmentIndex := info.SegmentIndexes[segID] - segmentIndex.BuildID, err = c.BuildIndex(ctx2, segID, segBinlog.GetNumOfRows(), segBinlog.GetFieldBinlogs(), field, &indexMeta, false) + zap.Int64("collection_id", collMeta.ID)) + info.BuildID, err = c.BuildIndex(ctx2, segID, segBinlog.GetNumOfRows(), segBinlog.GetFieldBinlogs(), field, &indexMeta, false) if err != nil { log.Debug("build index failed", zap.Int64("segment_id", segID), @@ -428,14 +421,14 @@ func (c *Core) checkFlushedSegments(ctx context.Context) { zap.Int64("index_id", indexMeta.IndexID)) continue } - if segmentIndex.BuildID != 0 { - segmentIndex.EnableIndex = true + if info.BuildID != 0 { + info.EnableIndex = true } - if err := c.MetaTable.AlterIndex(&info); err != nil { + if err := c.MetaTable.AddIndex(&info); err != nil { log.Debug("Add index into meta table failed", - zap.Int64("collection_id", collMeta.CollectionID), + zap.Int64("collection_id", collMeta.ID), zap.Int64("index_id", info.IndexID), - zap.Int64("build_id", segmentIndex.BuildID), + zap.Int64("build_id", info.BuildID), zap.Error(err)) } } @@ -452,16 +445,16 @@ func (c *Core) getSegments(ctx context.Context, collID typeutil.UniqueID) (map[U } segID2PartID := make(map[UniqueID]UniqueID) segID2Binlog := make(map[UniqueID]*datapb.SegmentBinlogs) - for _, part := range collMeta.Partitions { - if segs, err := c.CallGetRecoveryInfoService(ctx, collID, part.PartitionID); err == nil { + for _, partID := range collMeta.PartitionIDs { + if segs, err := c.CallGetRecoveryInfoService(ctx, collID, partID); err == nil { for _, s := range segs { - segID2PartID[s.SegmentID] = part.PartitionID + segID2PartID[s.SegmentID] = partID segID2Binlog[s.SegmentID] = s } } else { log.Error("failed to get flushed segments info from dataCoord", zap.Int64("collection ID", collID), - zap.Int64("partition ID", part.PartitionID), + zap.Int64("partition ID", partID), zap.Error(err)) return nil, nil, err } @@ -711,7 +704,7 @@ func (c *Core) SetIndexCoord(s types.IndexCoord) error { } }() - c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (retID typeutil.UniqueID, retErr error) { + c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (retID typeutil.UniqueID, retErr error) { defer func() { if err := recover(); err != nil { retErr = fmt.Errorf("build index panic, msg = %v", err) @@ -725,7 +718,7 @@ func (c *Core) SetIndexCoord(s types.IndexCoord) error { IndexID: idxInfo.IndexID, IndexName: idxInfo.IndexName, NumRows: numRows, - FieldSchema: model.ConvertToFieldSchemaPB(field), + FieldSchema: field, SegmentID: segID, }) if err != nil { @@ -871,14 +864,14 @@ func (c *Core) SetQueryCoord(s types.QueryCoord) error { } // BuildIndex will check row num and call build index service -func (c *Core) BuildIndex(ctx context.Context, segID UniqueID, numRows int64, binlogs []*datapb.FieldBinlog, field *model.Field, idxInfo *model.Index, isFlush bool) (typeutil.UniqueID, error) { +func (c *Core) BuildIndex(ctx context.Context, segID UniqueID, numRows int64, binlogs []*datapb.FieldBinlog, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, isFlush bool) (typeutil.UniqueID, error) { log.Debug("start build index", zap.String("index name", idxInfo.IndexName), zap.String("field name", field.Name), zap.Int64("segment id", segID)) sp, ctx := trace.StartSpanFromContext(ctx) defer sp.Finish() if c.MetaTable.IsSegmentIndexed(segID, field, idxInfo.IndexParams) { - info, err := c.MetaTable.GetSegmentIndexInfoByID(segID, field.FieldID, idxInfo.IndexName) - return info.SegmentIndexes[segID].BuildID, err + info, err := c.MetaTable.GetSegmentIndexInfoByID(segID, field.FieldID, idxInfo.GetIndexName()) + return info.BuildID, err } var bldID UniqueID var err error @@ -887,7 +880,7 @@ func (c *Core) BuildIndex(ctx context.Context, segID UniqueID, numRows int64, bi } else { binLogs := make([]string, 0) for _, fieldBinLog := range binlogs { - if fieldBinLog.GetFieldID() == field.FieldID { + if fieldBinLog.GetFieldID() == field.GetFieldID() { for _, binLog := range fieldBinLog.GetBinlogs() { binLogs = append(binLogs, binLog.LogPath) } @@ -1021,13 +1014,12 @@ func (c *Core) Init() error { log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError)) return initError } - - var ss *kvmetestore.SuffixSnapshot - if ss, initError = kvmetestore.NewSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil { + var ss *suffixSnapshot + if ss, initError = newSuffixSnapshot(metaKV, "_ts", Params.EtcdCfg.MetaRootPath, "snapshots"); initError != nil { log.Error("RootCoord failed to new suffixSnapshot", zap.Error(initError)) return initError } - if c.MetaTable, initError = NewMetaTable(c.ctx, metaKV, ss); initError != nil { + if c.MetaTable, initError = NewMetaTable(metaKV, ss); initError != nil { log.Error("RootCoord failed to new MetaTable", zap.Any("reason", initError)) return initError } @@ -1205,7 +1197,7 @@ func (c *Core) reSendDdMsg(ctx context.Context, force bool) error { if err != nil { return err } - if _, err = c.MetaTable.GetPartitionByName(collInfo.CollectionID, ddReq.PartitionName, 0); err != nil { + if _, err = c.MetaTable.GetPartitionByName(collInfo.ID, ddReq.PartitionName, 0); err != nil { if err = c.SendDdCreatePartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil { return err } @@ -1226,7 +1218,7 @@ func (c *Core) reSendDdMsg(ctx context.Context, force bool) error { if err != nil { return err } - if _, err = c.MetaTable.GetPartitionByName(collInfo.CollectionID, ddReq.PartitionName, 0); err == nil { + if _, err = c.MetaTable.GetPartitionByName(collInfo.ID, ddReq.PartitionName, 0); err == nil { if err = c.SendDdDropPartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil { return err } @@ -2060,14 +2052,14 @@ func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlus if len(coll.FieldIndexes) == 0 { log.Debug("no index params on collection", zap.String("role", typeutil.RootCoordRole), - zap.String("collection_name", coll.Name), zap.Int64("msgID", in.Base.MsgID)) + zap.String("collection_name", coll.Schema.Name), zap.Int64("msgID", in.Base.MsgID)) } for _, f := range coll.FieldIndexes { - fieldSch, err := GetFieldSchemaByID(coll, f.FieldID) + fieldSch, err := GetFieldSchemaByID(coll, f.FiledID) if err != nil { log.Warn("field schema not found", zap.String("role", typeutil.RootCoordRole), - zap.String("collection_name", coll.Name), zap.Int64("field id", f.FieldID), + zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) continue } @@ -2075,41 +2067,33 @@ func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlus idxInfo, err := c.MetaTable.GetIndexByID(f.IndexID) if err != nil { log.Warn("index not found", zap.String("role", typeutil.RootCoordRole), - zap.String("collection_name", coll.Name), zap.Int64("field id", f.FieldID), + zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID), zap.Int64("index id", f.IndexID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) continue } - info := model.Index{ + info := etcdpb.SegmentIndexInfo{ CollectionID: in.Segment.CollectionID, + PartitionID: in.Segment.PartitionID, + SegmentID: segID, FieldID: fieldSch.FieldID, IndexID: idxInfo.IndexID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: in.Segment.PartitionID, - }, - EnableIndex: false, - }, - }, + EnableIndex: false, } - - segmentIndex := info.SegmentIndexes[segID] - segmentIndex.BuildID, err = c.BuildIndex(ctx, segID, in.Segment.GetNumOfRows(), in.Segment.GetBinlogs(), fieldSch, idxInfo, true) - if err == nil && segmentIndex.BuildID != 0 { - segmentIndex.EnableIndex = true + info.BuildID, err = c.BuildIndex(ctx, segID, in.Segment.GetNumOfRows(), in.Segment.GetBinlogs(), fieldSch, idxInfo, true) + if err == nil && info.BuildID != 0 { + info.EnableIndex = true } else { log.Error("BuildIndex failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection_name", coll.Name), zap.Int64("field id", f.FieldID), - zap.Int64("index id", f.IndexID), zap.Int64("build id", segmentIndex.BuildID), + zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID), + zap.Int64("index id", f.IndexID), zap.Int64("build id", info.BuildID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) continue } - err = c.MetaTable.AlterIndex(&info) + err = c.MetaTable.AddIndex(&info) if err != nil { - log.Error("AlterIndex failed", zap.String("role", typeutil.RootCoordRole), - zap.String("collection_name", coll.Name), zap.Int64("field id", f.FieldID), + log.Error("AddIndex failed", zap.String("role", typeutil.RootCoordRole), + zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID), zap.Int64("index id", f.IndexID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err)) continue } @@ -2382,7 +2366,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) ( // Look up collection name on collection ID. var colName string - var colMeta *model.Collection + var colMeta *etcdpb.CollectionInfo if colMeta, err = c.MetaTable.GetCollectionByID(ti.GetCollectionId(), 0); err != nil { log.Error("failed to get collection name", zap.Int64("collection ID", ti.GetCollectionId()), @@ -2392,7 +2376,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) ( Reason: "failed to get collection name for collection ID" + strconv.FormatInt(ti.GetCollectionId(), 10), }, nil } - colName = colMeta.Name + colName = colMeta.GetSchema().GetName() // When DataNode has done its thing, remove it from the busy node list. And send import task again resendTaskFunc() @@ -2506,7 +2490,7 @@ func (c *Core) postImportPersistLoop(ctx context.Context, taskID int64, colID in if colMeta, err := c.MetaTable.GetCollectionByID(colID, 0); err != nil { log.Error("failed to find meta for collection", zap.Int64("collection ID", colID)) - } else if len(colMeta.FieldIndexes) != 0 { + } else if len(colMeta.GetFieldIndexes()) != 0 { c.wg.Add(1) c.checkCompleteIndexLoop(ctx, taskID, colID, colName, segIDs) } diff --git a/internal/rootcoord/root_coord_test.go b/internal/rootcoord/root_coord_test.go index 0120c150be..0600826e17 100644 --- a/internal/rootcoord/root_coord_test.go +++ b/internal/rootcoord/root_coord_test.go @@ -28,16 +28,19 @@ import ( "time" "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" + "github.com/milvus-io/milvus/internal/common" "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" memkv "github.com/milvus-io/milvus/internal/kv/mem" "github.com/milvus-io/milvus/internal/log" - kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv" - "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/mq/msgstream" "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/datapb" + "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/milvuspb" @@ -54,9 +57,6 @@ import ( "github.com/milvus-io/milvus/internal/util/retry" "github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/typeutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - clientv3 "go.etcd.io/etcd/client/v3" ) const ( @@ -450,7 +450,7 @@ func getNotTtMsg(ctx context.Context, n int, ch <-chan *msgstream.MsgPack) []msg } } -func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32, modifyFunc func(collection *model.Collection)) error { +func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32, modifyFunc func(*etcdpb.CollectionInfo)) error { schema := schemapb.CollectionSchema{ Name: collName, } @@ -511,29 +511,24 @@ func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32 chanNames[i] = funcutil.ToPhysicalChannel(vchanNames[i]) } - collInfo := model.Collection{ - CollectionID: collID, - Name: schema.Name, - Description: schema.Description, - AutoID: schema.AutoID, - Fields: model.BatchConvertFieldPBToModel(schema.Fields), - FieldIndexes: make([]*model.Index, 0, 16), - VirtualChannelNames: vchanNames, - PhysicalChannelNames: chanNames, - ShardsNum: 0, // intend to set zero - Partitions: []*model.Partition{ - { - PartitionID: partID, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: 0, - }, - }, + collInfo := etcdpb.CollectionInfo{ + ID: collID, + Schema: &schema, + PartitionIDs: []typeutil.UniqueID{partID}, + PartitionNames: []string{Params.CommonCfg.DefaultPartitionName}, + FieldIndexes: make([]*etcdpb.FieldIndexInfo, 0, 16), + VirtualChannelNames: vchanNames, + PhysicalChannelNames: chanNames, + ShardsNum: 0, // intend to set zero + PartitionCreatedTimestamps: []uint64{0}, } if modifyFunc != nil { modifyFunc(&collInfo) } + idxInfo := make([]*etcdpb.IndexInfo, 0, 16) + // schema is modified (add RowIDField and TimestampField), // so need Marshal again schemaBytes, err := proto.Marshal(&schema) @@ -578,7 +573,7 @@ func createCollectionInMeta(dbName, collName string, core *Core, shardsNum int32 // clear ddl timetick in all conditions defer core.chanTimeTick.removeDdlTimeTick(ts, reason) - err = core.MetaTable.AddCollection(&collInfo, ts, ddOpStr) + err = core.MetaTable.AddCollection(&collInfo, ts, idxInfo, ddOpStr) if err != nil { return fmt.Errorf("meta table add collection failed,error = %w", err) } @@ -755,21 +750,22 @@ func TestRootCoordInitData(t *testing.T) { err = core.MetaTable.DeleteCredential(util.UserRoot) assert.NoError(t, err) - snapshotKV, err := kvmetestore.NewMetaSnapshot(etcdCli, Params.EtcdCfg.MetaRootPath, TimestampPrefix, 7) + snapshotKV, err := newMetaSnapshot(etcdCli, Params.EtcdCfg.MetaRootPath, TimestampPrefix, 7) assert.NotNil(t, snapshotKV) assert.NoError(t, err) txnKV := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath) - mt, err := NewMetaTable(context.TODO(), txnKV, snapshotKV) + mt, err := NewMetaTable(txnKV, snapshotKV) assert.NoError(t, err) mockTxnKV := &mockTestTxnKV{ - TxnKV: mt.txn, - save: func(key, value string) error { - return fmt.Errorf("save error") - }, + TxnKV: mt.txn, + save: func(key, value string) error { return txnKV.Save(key, value) }, remove: func(key string) error { return txnKV.Remove(key) }, } - //mt.txn = mockTxnKV - mt.catalog = &kvmetestore.Catalog{Txn: mockTxnKV, Snapshot: snapshotKV} + mt.txn = mockTxnKV + // mock save data error + mockTxnKV.save = func(key, value string) error { + return fmt.Errorf("save error") + } core.MetaTable = mt err = core.initData() assert.Error(t, err) @@ -935,19 +931,20 @@ func TestRootCoord_Base(t *testing.T) { dmlStream.Start() pChanMap := core.MetaTable.ListCollectionPhysicalChannels() - assert.Greater(t, len(pChanMap[createMeta.CollectionID]), 0) + assert.Greater(t, len(pChanMap[createMeta.ID]), 0) vChanMap := core.MetaTable.ListCollectionVirtualChannels() - assert.Greater(t, len(vChanMap[createMeta.CollectionID]), 0) + assert.Greater(t, len(vChanMap[createMeta.ID]), 0) // get CreateCollectionMsg msgs := getNotTtMsg(ctx, 1, dmlStream.Chan()) assert.Equal(t, 1, len(msgs)) createMsg, ok := (msgs[0]).(*msgstream.CreateCollectionMsg) assert.True(t, ok) - assert.Equal(t, createMeta.CollectionID, createMsg.CollectionID) - assert.Equal(t, 1, len(createMeta.Partitions)) - assert.Equal(t, createMeta.Partitions[0].PartitionID, createMsg.PartitionID) - assert.Equal(t, createMeta.Partitions[0].PartitionName, createMsg.PartitionName) + assert.Equal(t, createMeta.ID, createMsg.CollectionID) + assert.Equal(t, 1, len(createMeta.PartitionIDs)) + assert.Equal(t, createMeta.PartitionIDs[0], createMsg.PartitionID) + assert.Equal(t, 1, len(createMeta.PartitionNames)) + assert.Equal(t, createMeta.PartitionNames[0], createMsg.PartitionName) assert.Equal(t, shardsNum, int32(len(createMeta.VirtualChannelNames))) assert.Equal(t, shardsNum, int32(len(createMeta.PhysicalChannelNames))) assert.Equal(t, shardsNum, createMeta.ShardsNum) @@ -987,8 +984,8 @@ func TestRootCoord_Base(t *testing.T) { var ddCollReq = internalpb.CreateCollectionRequest{} err = proto.Unmarshal(ddOp.Body, &ddCollReq) assert.NoError(t, err) - assert.Equal(t, createMeta.CollectionID, ddCollReq.CollectionID) - assert.Equal(t, createMeta.Partitions[0].PartitionID, ddCollReq.PartitionID) + assert.Equal(t, createMeta.ID, ddCollReq.CollectionID) + assert.Equal(t, createMeta.PartitionIDs[0], ddCollReq.PartitionID) // check invalid operation req.Base.MsgID = 101 @@ -1090,7 +1087,7 @@ func TestRootCoord_Base(t *testing.T) { assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) + assert.Equal(t, collMeta.ID, rsp.CollectionID) assert.Equal(t, shardsNum, int32(len(rsp.VirtualChannelNames))) assert.Equal(t, shardsNum, int32(len(rsp.PhysicalChannelNames))) assert.Equal(t, shardsNum, rsp.ShardsNum) @@ -1136,8 +1133,8 @@ func TestRootCoord_Base(t *testing.T) { assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) assert.NoError(t, err) - assert.Equal(t, 2, len(collMeta.Partitions)) - partNameIdx1, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0) + assert.Equal(t, 2, len(collMeta.PartitionIDs)) + partNameIdx1, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[1], 0) assert.NoError(t, err) assert.Equal(t, partName, partNameIdx1) @@ -1145,11 +1142,11 @@ func TestRootCoord_Base(t *testing.T) { assert.Equal(t, 1, len(msgs)) partMsg, ok := (msgs[0]).(*msgstream.CreatePartitionMsg) assert.True(t, ok) - assert.Equal(t, collMeta.CollectionID, partMsg.CollectionID) - assert.Equal(t, collMeta.Partitions[1].PartitionID, partMsg.PartitionID) + assert.Equal(t, collMeta.ID, partMsg.CollectionID) + assert.Equal(t, collMeta.PartitionIDs[1], partMsg.PartitionID) assert.Equal(t, 1, len(pnm.GetCollIDs())) - assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[0]) + assert.Equal(t, collMeta.ID, pnm.GetCollIDs()[0]) // check DD operation info flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix) @@ -1165,8 +1162,8 @@ func TestRootCoord_Base(t *testing.T) { var ddReq = internalpb.CreatePartitionRequest{} err = proto.Unmarshal(ddOp.Body, &ddReq) assert.NoError(t, err) - assert.Equal(t, collMeta.CollectionID, ddReq.CollectionID) - assert.Equal(t, collMeta.Partitions[1].PartitionID, ddReq.PartitionID) + assert.Equal(t, collMeta.ID, ddReq.CollectionID) + assert.Equal(t, collMeta.PartitionIDs[1], ddReq.PartitionID) err = core.reSendDdMsg(core.ctx, true) assert.NoError(t, err) @@ -1206,7 +1203,7 @@ func TestRootCoord_Base(t *testing.T) { }, DbName: dbName, CollectionName: collName, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, } rsp, err := core.ShowPartitions(ctx, req) assert.NoError(t, err) @@ -1220,7 +1217,7 @@ func TestRootCoord_Base(t *testing.T) { defer wg.Done() coll, err := core.MetaTable.GetCollectionByName(collName, 0) assert.NoError(t, err) - partID := coll.Partitions[1].PartitionID + partID := coll.PartitionIDs[1] dm.mu.Lock() dm.segs = []typeutil.UniqueID{1000, 1001, 1002, 1003, 1004, 1005} dm.mu.Unlock() @@ -1232,7 +1229,7 @@ func TestRootCoord_Base(t *testing.T) { Timestamp: 170, SourceID: 170, }, - CollectionID: coll.CollectionID, + CollectionID: coll.GetID(), PartitionID: partID, } rsp, err := core.ShowSegments(ctx, req) @@ -1310,7 +1307,7 @@ func TestRootCoord_Base(t *testing.T) { Timestamp: 190, SourceID: 190, }, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, SegmentID: 1000, } rsp, err := core.DescribeSegment(ctx, req) @@ -1370,20 +1367,20 @@ func TestRootCoord_Base(t *testing.T) { assert.NoError(t, err) // Normal case. count, err := core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, ""), - collName, coll.CollectionID, []UniqueID{1000, 1001, 1002}) + collName, coll.ID, []UniqueID{1000, 1001, 1002}) assert.NoError(t, err) assert.Equal(t, 3, count) // Case with an empty result. - count, err = core.CountCompleteIndex(ctx, collName, coll.CollectionID, []UniqueID{}) + count, err = core.CountCompleteIndex(ctx, collName, coll.ID, []UniqueID{}) assert.NoError(t, err) assert.Equal(t, 0, count) // Case where GetIndexStates failed with error. _, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, returnError), - collName, coll.CollectionID, []UniqueID{1000, 1001, 1002}) + collName, coll.ID, []UniqueID{1000, 1001, 1002}) assert.Error(t, err) // Case where GetIndexStates failed with bad status. _, err = core.CountCompleteIndex(context.WithValue(ctx, ctxKey{}, returnUnsuccessfulStatus), - collName, coll.CollectionID, []UniqueID{1000, 1001, 1002}) + collName, coll.ID, []UniqueID{1000, 1001, 1002}) assert.Error(t, err) }) @@ -1392,7 +1389,7 @@ func TestRootCoord_Base(t *testing.T) { defer wg.Done() coll, err := core.MetaTable.GetCollectionByName(collName, 0) assert.NoError(t, err) - partID := coll.Partitions[1].PartitionID + partID := coll.PartitionIDs[1] flushMsg := datapb.SegmentFlushCompletedMsg{ Base: &commonpb.MsgBase{ @@ -1400,7 +1397,7 @@ func TestRootCoord_Base(t *testing.T) { }, Segment: &datapb.SegmentInfo{ ID: segID, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, PartitionID: partID, }, } @@ -1443,7 +1440,7 @@ func TestRootCoord_Base(t *testing.T) { } coll, err := core.MetaTable.GetCollectionByName(collName, 0) assert.NoError(t, err) - core.MetaTable.collName2ID[collName] = coll.CollectionID + core.MetaTable.collName2ID[collName] = coll.GetID() rsp, err := core.Import(ctx, req) assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) @@ -1499,7 +1496,7 @@ func TestRootCoord_Base(t *testing.T) { Base: &commonpb.MsgBase{ MsgType: commonpb.MsgType_DescribeSegment, }, - CollectionID: coll.CollectionID, + CollectionID: coll.ID, SegmentID: segmentID, } segDesc, err := core.DescribeSegment(ctx, describeSegmentRequest) @@ -1531,15 +1528,10 @@ func TestRootCoord_Base(t *testing.T) { return tID, 0, nil } core.MetaTable.collName2ID["new"+collName] = 123 - core.MetaTable.collID2Meta[123] = model.Collection{ - CollectionID: 123, - Partitions: []*model.Partition{ - { - PartitionID: 456, - PartitionName: "testPartition", - }, - }, - } + core.MetaTable.collID2Meta[123] = etcdpb.CollectionInfo{ + ID: 123, + PartitionIDs: []int64{456}, + PartitionNames: []string{"testPartition"}} req := &milvuspb.ImportRequest{ CollectionName: "new" + collName, PartitionName: partName, @@ -1722,14 +1714,14 @@ func TestRootCoord_Base(t *testing.T) { } collMeta, err := core.MetaTable.GetCollectionByName(collName, 0) assert.NoError(t, err) - dropPartID := collMeta.Partitions[1].PartitionID + dropPartID := collMeta.PartitionIDs[1] status, err := core.DropPartition(ctx, req) assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode) collMeta, err = core.MetaTable.GetCollectionByName(collName, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(collMeta.Partitions)) - partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0) + assert.Equal(t, 1, len(collMeta.PartitionIDs)) + partName, err := core.MetaTable.GetPartitionNameByID(collMeta.ID, collMeta.PartitionIDs[0], 0) assert.NoError(t, err) assert.Equal(t, Params.CommonCfg.DefaultPartitionName, partName) @@ -1737,11 +1729,11 @@ func TestRootCoord_Base(t *testing.T) { assert.Equal(t, 1, len(msgs)) dmsg, ok := (msgs[0]).(*msgstream.DropPartitionMsg) assert.True(t, ok) - assert.Equal(t, collMeta.CollectionID, dmsg.CollectionID) + assert.Equal(t, collMeta.ID, dmsg.CollectionID) assert.Equal(t, dropPartID, dmsg.PartitionID) assert.Equal(t, 2, len(pnm.GetCollIDs())) - assert.Equal(t, collMeta.CollectionID, pnm.GetCollIDs()[1]) + assert.Equal(t, collMeta.ID, pnm.GetCollIDs()[1]) // check DD operation info flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix) @@ -1757,7 +1749,7 @@ func TestRootCoord_Base(t *testing.T) { var ddReq = internalpb.DropPartitionRequest{} err = proto.Unmarshal(ddOp.Body, &ddReq) assert.NoError(t, err) - assert.Equal(t, collMeta.CollectionID, ddReq.CollectionID) + assert.Equal(t, collMeta.ID, ddReq.CollectionID) assert.Equal(t, dropPartID, ddReq.PartitionID) err = core.reSendDdMsg(core.ctx, true) @@ -1775,7 +1767,7 @@ func TestRootCoord_Base(t *testing.T) { MsgType: commonpb.MsgType_RemoveQueryChannels, SourceID: core.session.ServerID, }, - CollectionID: collMeta.CollectionID, + CollectionID: collMeta.ID, } status, err := core.ReleaseDQLMessageStream(core.ctx, req) assert.NoError(t, err) @@ -1808,15 +1800,15 @@ func TestRootCoord_Base(t *testing.T) { assert.Equal(t, 1, len(msgs)) dmsg, ok := (msgs[0]).(*msgstream.DropCollectionMsg) assert.True(t, ok) - assert.Equal(t, collMeta.CollectionID, dmsg.CollectionID) + assert.Equal(t, collMeta.ID, dmsg.CollectionID) collIDs := pnm.GetCollIDs() assert.Equal(t, 3, len(collIDs)) - assert.Equal(t, collMeta.CollectionID, collIDs[2]) + assert.Equal(t, collMeta.ID, collIDs[2]) time.Sleep(100 * time.Millisecond) qm.mutex.Lock() assert.Equal(t, 1, len(qm.collID)) - assert.Equal(t, collMeta.CollectionID, qm.collID[0]) + assert.Equal(t, collMeta.ID, qm.collID[0]) qm.mutex.Unlock() req = &milvuspb.DropCollectionRequest{ @@ -1835,7 +1827,7 @@ func TestRootCoord_Base(t *testing.T) { time.Sleep(100 * time.Millisecond) collIDs = pnm.GetCollIDs() assert.Equal(t, 3, len(collIDs)) - assert.Equal(t, collMeta.CollectionID, collIDs[2]) + assert.Equal(t, collMeta.ID, collIDs[2]) // check DD operation info flag, err := core.MetaTable.txn.Load(DDMsgSendPrefix) @@ -1851,7 +1843,7 @@ func TestRootCoord_Base(t *testing.T) { var ddReq = internalpb.DropCollectionRequest{} err = proto.Unmarshal(ddOp.Body, &ddReq) assert.NoError(t, err) - assert.Equal(t, collMeta.CollectionID, ddReq.CollectionID) + assert.Equal(t, collMeta.ID, ddReq.CollectionID) err = core.reSendDdMsg(core.ctx, true) assert.NoError(t, err) @@ -2239,7 +2231,7 @@ func TestRootCoord_Base(t *testing.T) { p2 := sessionutil.Session{ ServerID: 101, } - ctx2, cancel2 := context.WithTimeout(ctx, kvmetestore.RequestTimeout) + ctx2, cancel2 := context.WithTimeout(ctx, RequestTimeout) defer cancel2() s1, err := json.Marshal(&p1) assert.NoError(t, err) @@ -2904,7 +2896,7 @@ func TestRootCoord2(t *testing.T) { assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) + assert.Equal(t, collMeta.ID, rsp.CollectionID) assert.Equal(t, common.DefaultShardsNum, int32(len(rsp.VirtualChannelNames))) assert.Equal(t, common.DefaultShardsNum, int32(len(rsp.PhysicalChannelNames))) assert.Equal(t, common.DefaultShardsNum, rsp.ShardsNum) @@ -3002,7 +2994,7 @@ func TestCheckInit(t *testing.T) { err = c.checkInit() assert.Error(t, err) - c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *model.Field, idxInfo *model.Index, numRows int64) (typeutil.UniqueID, error) { + c.CallBuildIndexService = func(ctx context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, numRows int64) (typeutil.UniqueID, error) { return 0, nil } err = c.checkInit() @@ -3126,27 +3118,25 @@ func TestCheckFlushedSegments(t *testing.T) { var segID int64 = 1001 var fieldID int64 = 101 var indexID int64 = 6001 - core.MetaTable.segID2IndexMeta[segID] = make(map[int64]model.Index) + core.MetaTable.segID2IndexMeta[segID] = make(map[int64]etcdpb.SegmentIndexInfo) core.MetaTable.partID2SegID[partID] = make(map[int64]bool) - core.MetaTable.collID2Meta[collID] = model.Collection{CollectionID: collID} + core.MetaTable.collID2Meta[collID] = etcdpb.CollectionInfo{ID: collID} // do nothing, since collection has 0 index core.checkFlushedSegments(ctx) // get field schema by id fail - core.MetaTable.collID2Meta[collID] = model.Collection{ - CollectionID: collID, - Partitions: []*model.Partition{ + core.MetaTable.collID2Meta[collID] = etcdpb.CollectionInfo{ + ID: collID, + PartitionIDs: []int64{partID}, + FieldIndexes: []*etcdpb.FieldIndexInfo{ { - PartitionID: partID, - }, - }, - FieldIndexes: []*model.Index{ - { - FieldID: fieldID, + FiledID: fieldID, IndexID: indexID, }, }, - Fields: []*model.Field{}, + Schema: &schemapb.CollectionSchema{ + Fields: []*schemapb.FieldSchema{}, + }, } core.checkFlushedSegments(ctx) @@ -3162,26 +3152,23 @@ func TestCheckFlushedSegments(t *testing.T) { core.checkFlushedSegments(core.ctx) // missing index info - core.MetaTable.collID2Meta[collID] = model.Collection{ - CollectionID: collID, - Fields: []*model.Field{ + core.MetaTable.collID2Meta[collID] = etcdpb.CollectionInfo{ + ID: collID, + PartitionIDs: []int64{partID}, + FieldIndexes: []*etcdpb.FieldIndexInfo{ { - FieldID: fieldID, - }, - }, - FieldIndexes: []*model.Index{ - { - FieldID: fieldID, + FiledID: fieldID, IndexID: indexID, }, }, - Partitions: []*model.Partition{ - { - PartitionID: partID, + Schema: &schemapb.CollectionSchema{ + Fields: []*schemapb.FieldSchema{ + { + FieldID: fieldID, + }, }, }, } - core.checkFlushedSegments(ctx) // existing segID, buildIndex failed core.CallGetFlushedSegmentsService = func(_ context.Context, cid, pid int64) ([]int64, error) { @@ -3189,10 +3176,10 @@ func TestCheckFlushedSegments(t *testing.T) { assert.Equal(t, partID, pid) return []int64{segID}, nil } - core.MetaTable.indexID2Meta[indexID] = model.Index{ + core.MetaTable.indexID2Meta[indexID] = etcdpb.IndexInfo{ IndexID: indexID, } - core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *model.Field, idx *model.Index, numRows int64) (int64, error) { + core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idx *etcdpb.IndexInfo, numRows int64) (int64, error) { assert.Equal(t, fieldID, field.FieldID) assert.Equal(t, indexID, idx.IndexID) return -1, errors.New("build index build") @@ -3201,7 +3188,7 @@ func TestCheckFlushedSegments(t *testing.T) { core.checkFlushedSegments(ctx) var indexBuildID int64 = 10001 - core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *model.Field, idx *model.Index, numRows int64) (int64, error) { + core.CallBuildIndexService = func(_ context.Context, segID UniqueID, binlog []string, field *schemapb.FieldSchema, idx *etcdpb.IndexInfo, numRows int64) (int64, error) { return indexBuildID, nil } core.checkFlushedSegments(core.ctx) @@ -3277,7 +3264,7 @@ func TestRootCoord_CheckZeroShardsNum(t *testing.T) { time.Sleep(100 * time.Millisecond) - modifyFunc := func(collInfo *model.Collection) { + modifyFunc := func(collInfo *etcdpb.CollectionInfo) { collInfo.ShardsNum = 0 } @@ -3300,7 +3287,7 @@ func TestRootCoord_CheckZeroShardsNum(t *testing.T) { assert.NoError(t, err) assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode) assert.Equal(t, collName, rsp.Schema.Name) - assert.Equal(t, collMeta.CollectionID, rsp.CollectionID) + assert.Equal(t, collMeta.ID, rsp.CollectionID) assert.Equal(t, shardsNum, int32(len(rsp.VirtualChannelNames))) assert.Equal(t, shardsNum, int32(len(rsp.PhysicalChannelNames))) assert.Equal(t, shardsNum, rsp.ShardsNum) @@ -3357,25 +3344,20 @@ func TestCore_DescribeSegments(t *testing.T) { // success. c.MetaTable = &MetaTable{ - segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index{ + segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{ segID: { indexID: { CollectionID: collID, + PartitionID: partID, + SegmentID: segID, FieldID: fieldID, IndexID: indexID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - PartitionID: partID, - SegmentID: segID, - }, - BuildID: buildID, - EnableIndex: true}, - }, + BuildID: buildID, + EnableIndex: true, }, }, }, - indexID2Meta: map[typeutil.UniqueID]model.Index{ + indexID2Meta: map[typeutil.UniqueID]etcdpb.IndexInfo{ indexID: { IndexName: indexName, IndexID: indexID, diff --git a/internal/metastore/kv/suffix_snapshot.go b/internal/rootcoord/suffix_snapshot.go similarity index 87% rename from internal/metastore/kv/suffix_snapshot.go rename to internal/rootcoord/suffix_snapshot.go index 24d11c1b70..3fcdc1fe5b 100644 --- a/internal/metastore/kv/suffix_snapshot.go +++ b/internal/rootcoord/suffix_snapshot.go @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kv +package rootcoord import ( "bytes" @@ -35,15 +35,15 @@ import ( ) var ( - // SuffixSnapshotTombstone special value for tombstone mark - SuffixSnapshotTombstone = []byte{0xE2, 0x9B, 0xBC} + // suffixSnapshotTombstone special value for tombstone mark + suffixSnapshotTombstone = []byte{0xE2, 0x9B, 0xBC} ) -// SuffixSnapshot implements SnapshotKV -// this is a simple replacement for MetaSnapshot, which is not available due to etcd compaction -// SuffixSnapshot record timestamp as prefix of a key under the Snapshot prefix path -type SuffixSnapshot struct { - // internal kv which SuffixSnapshot based on +// suffixSnapshot implements SnapshotKV +// this is a simple replacement for metaSnapshot, which is not available due to etcd compaction +// suffixSnapshot record timestamp as prefix of a key under the snapshot prefix path +type suffixSnapshot struct { + // internal kv which suffixSnapshot based on kv.TxnKV // rw mutex provided range lock sync.RWMutex @@ -76,10 +76,10 @@ type tsv struct { } // type conversion make sure implementation -var _ kv.SnapShotKV = (*SuffixSnapshot)(nil) +var _ kv.SnapShotKV = (*suffixSnapshot)(nil) -// NewSuffixSnapshot creates a NewSuffixSnapshot with provided kv -func NewSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*SuffixSnapshot, error) { +// newSuffixSnapshot creates a newSuffixSnapshot with provided kv +func newSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*suffixSnapshot, error) { if txnKV == nil { return nil, retry.Unrecoverable(errors.New("txnKV is nil")) } @@ -92,7 +92,7 @@ func NewSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*SuffixSnaps tk = path.Join(root, "k") rootLen := len(tk) - 1 - return &SuffixSnapshot{ + return &suffixSnapshot{ TxnKV: txnKV, lastestTS: make(map[string]typeutil.Timestamp), separator: sep, @@ -105,31 +105,31 @@ func NewSuffixSnapshot(txnKV kv.TxnKV, sep, root, snapshot string) (*SuffixSnaps } // isTombstone helper function to check whether is tombstone mark -func (ss *SuffixSnapshot) isTombstone(value string) bool { - return bytes.Equal([]byte(value), SuffixSnapshotTombstone) +func (ss *suffixSnapshot) isTombstone(value string) bool { + return bytes.Equal([]byte(value), suffixSnapshotTombstone) } // hideRootPrefix helper function to hide root prefix from key -func (ss *SuffixSnapshot) hideRootPrefix(value string) string { +func (ss *suffixSnapshot) hideRootPrefix(value string) string { return value[ss.rootLen:] } // composeSnapshotPrefix build a prefix for load snapshots // formated like [snapshotPrefix]/key[sep] -func (ss *SuffixSnapshot) composeSnapshotPrefix(key string) string { +func (ss *suffixSnapshot) composeSnapshotPrefix(key string) string { return path.Join(ss.snapshotPrefix, key+ss.separator) } // composeTSKey unified tsKey composing method // uses key, ts and separator to form a key -func (ss *SuffixSnapshot) composeTSKey(key string, ts typeutil.Timestamp) string { +func (ss *suffixSnapshot) composeTSKey(key string, ts typeutil.Timestamp) string { // [key][sep][ts] return path.Join(ss.snapshotPrefix, fmt.Sprintf("%s%s%d", key, ss.separator, ts)) } // isTSKey checks whether a key is in ts-key format // if true, also returns parsed ts value -func (ss *SuffixSnapshot) isTSKey(key string) (typeutil.Timestamp, bool) { +func (ss *suffixSnapshot) isTSKey(key string) (typeutil.Timestamp, bool) { // not in snapshot path if !strings.HasPrefix(key, ss.snapshotPrefix) { return 0, false @@ -146,7 +146,7 @@ func (ss *SuffixSnapshot) isTSKey(key string) (typeutil.Timestamp, bool) { // isTSOfKey check whether a key is in ts-key format of provided group key // if true, laso returns parsed ts value -func (ss *SuffixSnapshot) isTSOfKey(key string, groupKey string) (typeutil.Timestamp, bool) { +func (ss *suffixSnapshot) isTSOfKey(key string, groupKey string) (typeutil.Timestamp, bool) { // not in snapshot path if !strings.HasPrefix(key, ss.snapshotPrefix) { return 0, false @@ -167,7 +167,7 @@ func (ss *SuffixSnapshot) isTSOfKey(key string, groupKey string) (typeutil.Times // checkKeyTS checks provided key's latest ts is before provided ts // lock is needed -func (ss *SuffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, error) { +func (ss *suffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, error) { latest, has := ss.lastestTS[key] if !has { err := ss.loadLatestTS(key) @@ -180,11 +180,11 @@ func (ss *SuffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, e } // loadLatestTS load the loatest ts for specified key -func (ss *SuffixSnapshot) loadLatestTS(key string) error { +func (ss *suffixSnapshot) loadLatestTS(key string) error { prefix := ss.composeSnapshotPrefix(key) keys, _, err := ss.TxnKV.LoadWithPrefix(prefix) if err != nil { - log.Warn("SuffixSnapshot txnkv LoadWithPrefix failed", zap.String("key", key), + log.Warn("suffixSnapshot txnkv LoadWithPrefix failed", zap.String("key", key), zap.Error(err)) return err } @@ -241,10 +241,10 @@ func binarySearchRecords(records []tsv, ts typeutil.Timestamp) (string, bool) { } // Save stores key-value pairs with timestamp -// if ts is 0, SuffixSnapshot works as a TxnKV -// otherwise, SuffixSnapshot will store a ts-key as "key[sep]ts"-value pair in snapshot path +// if ts is 0, suffixSnapshot works as a TxnKV +// otherwise, suffixSnapshot will store a ts-key as "key[sep]ts"-value pair in snapshot path // and for acceleration store original key-value if ts is the latest -func (ss *SuffixSnapshot) Save(key string, value string, ts typeutil.Timestamp) error { +func (ss *suffixSnapshot) Save(key string, value string, ts typeutil.Timestamp) error { // if ts == 0, act like TxnKv // will not update lastestTs since ts not not valid if ts == 0 { @@ -278,7 +278,7 @@ func (ss *SuffixSnapshot) Save(key string, value string, ts typeutil.Timestamp) return ss.TxnKV.Save(tsKey, value) } -func (ss *SuffixSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) { +func (ss *suffixSnapshot) Load(key string, ts typeutil.Timestamp) (string, error) { // if ts == 0, load latest by definition // and with acceleration logic, just do load key will do if ts == 0 { @@ -351,7 +351,7 @@ func (ss *SuffixSnapshot) Load(key string, ts typeutil.Timestamp) (string, error // MultiSave save multiple kvs // if ts == 0, act like TxnKV // each key-value will be treated using same logic like Save -func (ss *SuffixSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error { +func (ss *suffixSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp) error { // if ts == 0, act like TxnKV if ts == 0 { return ss.TxnKV.MultiSave(kvs) @@ -378,7 +378,7 @@ func (ss *SuffixSnapshot) MultiSave(kvs map[string]string, ts typeutil.Timestamp // generateSaveExecute examine each key is the after the corresponding latest // returns calculated execute map and update ts list -func (ss *SuffixSnapshot) generateSaveExecute(kvs map[string]string, ts typeutil.Timestamp) (map[string]string, []string, error) { +func (ss *suffixSnapshot) generateSaveExecute(kvs map[string]string, ts typeutil.Timestamp) (map[string]string, []string, error) { var after bool var err error execute := make(map[string]string) @@ -403,7 +403,7 @@ func (ss *SuffixSnapshot) generateSaveExecute(kvs map[string]string, ts typeutil } // LoadWithPrefix load keys with provided prefix and returns value in the ts -func (ss *SuffixSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) { +func (ss *suffixSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]string, []string, error) { // ts 0 case shall be treated as fetch latest/current value if ts == 0 { keys, values, err := ss.TxnKV.LoadWithPrefix(key) @@ -484,7 +484,7 @@ func (ss *SuffixSnapshot) LoadWithPrefix(key string, ts typeutil.Timestamp) ([]s // MultiSaveAndRemoveWithPrefix save muiltple kvs and remove as well // if ts == 0, act like TxnKV // each key-value will be treated in same logic like Save -func (ss *SuffixSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error { +func (ss *suffixSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts typeutil.Timestamp) error { // if ts == 0, act like TxnKV if ts == 0 { return ss.TxnKV.MultiSaveAndRemoveWithPrefix(saves, removals) @@ -503,15 +503,15 @@ func (ss *SuffixSnapshot) MultiSaveAndRemoveWithPrefix(saves map[string]string, for _, removal := range removals { keys, _, err := ss.TxnKV.LoadWithPrefix(removal) if err != nil { - log.Warn("SuffixSnapshot TxnKV LoadwithPrefix failed", zap.String("key", removal), zap.Error(err)) + log.Warn("suffixSnapshot TxnKV LoadwithPrefix failed", zap.String("key", removal), zap.Error(err)) return err } // add tombstone to orignal key and add ts entry for _, key := range keys { key = ss.hideRootPrefix(key) - execute[key] = string(SuffixSnapshotTombstone) - execute[ss.composeTSKey(key, ts)] = string(SuffixSnapshotTombstone) + execute[key] = string(suffixSnapshotTombstone) + execute[ss.composeTSKey(key, ts)] = string(suffixSnapshotTombstone) updateList = append(updateList, key) } } diff --git a/internal/metastore/kv/suffix_snapshot_test.go b/internal/rootcoord/suffix_snapshot_test.go similarity index 95% rename from internal/metastore/kv/suffix_snapshot_test.go rename to internal/rootcoord/suffix_snapshot_test.go index c1597e9d05..5847c226fc 100644 --- a/internal/metastore/kv/suffix_snapshot_test.go +++ b/internal/rootcoord/suffix_snapshot_test.go @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kv +package rootcoord import ( "fmt" @@ -171,7 +171,7 @@ func Test_binarySearchRecords(t *testing.T) { func Test_ComposeIsTsKey(t *testing.T) { sep := "_ts" - ss, err := NewSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix) + ss, err := newSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix) require.Nil(t, err) type testcase struct { key string @@ -209,7 +209,7 @@ func Test_ComposeIsTsKey(t *testing.T) { func Test_SuffixSnaphotIsTSOfKey(t *testing.T) { sep := "_ts" - ss, err := NewSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix) + ss, err := newSuffixSnapshot((*etcdkv.EtcdKV)(nil), sep, "", snapshotPrefix) require.Nil(t, err) type testcase struct { key string @@ -274,7 +274,7 @@ func Test_SuffixSnapshotLoad(t *testing.T) { return vtso } - ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) + ss, err := newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) assert.Nil(t, err) assert.NotNil(t, ss) @@ -295,7 +295,7 @@ func Test_SuffixSnapshotLoad(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "value-19", val) - ss, err = NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) + ss, err = newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) assert.Nil(t, err) assert.NotNil(t, ss) @@ -326,7 +326,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) { return vtso } - ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) + ss, err := newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) assert.Nil(t, err) assert.NotNil(t, ss) @@ -358,7 +358,7 @@ func Test_SuffixSnapshotMultiSave(t *testing.T) { assert.Equal(t, vals[0], "v1-19") assert.Equal(t, vals[1], "v2-19") - ss, err = NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) + ss, err = newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) assert.Nil(t, err) assert.NotNil(t, ss) for i := 0; i < 20; i++ { @@ -403,7 +403,7 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) { return vtso } - ss, err := NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) + ss, err := newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) assert.Nil(t, err) assert.NotNil(t, ss) @@ -440,7 +440,7 @@ func Test_SuffixSnapshotMultiSaveAndRemoveWithPrefix(t *testing.T) { assert.Equal(t, 39-i, len(vals)) } - ss, err = NewSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) + ss, err = newSuffixSnapshot(etcdkv, sep, rootPath, snapshotPrefix) assert.Nil(t, err) assert.NotNil(t, ss) diff --git a/internal/rootcoord/task.go b/internal/rootcoord/task.go index 1210f67e80..c5fbd201c7 100644 --- a/internal/rootcoord/task.go +++ b/internal/rootcoord/task.go @@ -24,7 +24,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/milvus-io/milvus/internal/common" "github.com/milvus-io/milvus/internal/log" - model "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/proto/internalpb" @@ -156,6 +155,21 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error { } } + collInfo := etcdpb.CollectionInfo{ + ID: collID, + Schema: &schema, + PartitionIDs: []typeutil.UniqueID{partID}, + PartitionNames: []string{Params.CommonCfg.DefaultPartitionName}, + FieldIndexes: make([]*etcdpb.FieldIndexInfo, 0, 16), + VirtualChannelNames: vchanNames, + PhysicalChannelNames: chanNames, + ShardsNum: t.Req.ShardsNum, + PartitionCreatedTimestamps: []uint64{0}, + ConsistencyLevel: t.Req.ConsistencyLevel, + } + + idxInfo := make([]*etcdpb.IndexInfo, 0, 16) + // schema is modified (add RowIDField and TimestampField), // so need Marshal again schemaBytes, err := proto.Marshal(&schema) @@ -190,27 +204,6 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error { return fmt.Errorf("encodeDdOperation fail, error = %w", err) } - collInfo := model.Collection{ - CollectionID: collID, - Name: schema.Name, - Description: schema.Description, - AutoID: schema.AutoID, - Fields: model.BatchConvertFieldPBToModel(schema.Fields), - VirtualChannelNames: vchanNames, - PhysicalChannelNames: chanNames, - ShardsNum: t.Req.ShardsNum, - ConsistencyLevel: t.Req.ConsistencyLevel, - FieldIndexes: make([]*model.Index, 0, 16), - CreateTime: ts, - Partitions: []*model.Partition{ - { - PartitionID: partID, - PartitionName: Params.CommonCfg.DefaultPartitionName, - PartitionCreatedTimestamp: ts, - }, - }, - } - // use lambda function here to guarantee all resources to be released createCollectionFn := func() error { // lock for ddl operation @@ -239,7 +232,7 @@ func (t *CreateCollectionReqTask) Execute(ctx context.Context) error { } // update meta table after send dd operation - if err = t.core.MetaTable.AddCollection(&collInfo, ts, ddOpStr); err != nil { + if err = t.core.MetaTable.AddCollection(&collInfo, ts, idxInfo, ddOpStr); err != nil { t.core.chanTimeTick.removeDmlChannels(chanNames...) t.core.chanTimeTick.removeDeltaChannels(deltaChanNames...) // it's ok just to leave create collection message sent, datanode and querynode does't process CreateCollection logic @@ -297,17 +290,17 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error { DbName: t.Req.DbName, CollectionName: t.Req.CollectionName, DbID: 0, //not used - CollectionID: collMeta.CollectionID, + CollectionID: collMeta.ID, } - reason := fmt.Sprintf("drop collection %d", collMeta.CollectionID) + reason := fmt.Sprintf("drop collection %d", collMeta.ID) ts, err := t.core.TSOAllocator(1) if err != nil { return fmt.Errorf("TSO alloc fail, error = %w", err) } //notify query service to release collection - if err = t.core.CallReleaseCollectionService(t.core.ctx, ts, 0, collMeta.CollectionID); err != nil { + if err = t.core.CallReleaseCollectionService(t.core.ctx, ts, 0, collMeta.ID); err != nil { log.Error("Failed to CallReleaseCollectionService", zap.Error(err)) return err } @@ -346,7 +339,7 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error { } // update meta table after send dd operation - if err = t.core.MetaTable.DeleteCollection(collMeta.CollectionID, ts, ddOpStr); err != nil { + if err = t.core.MetaTable.DeleteCollection(collMeta.ID, ts, ddOpStr); err != nil { return err } @@ -380,7 +373,7 @@ func (t *DropCollectionReqTask) Execute(ctx context.Context) error { } // invalidate all the collection meta cache with the specified collectionID - err = t.core.ExpireMetaCache(ctx, nil, collMeta.CollectionID, ts) + err = t.core.ExpireMetaCache(ctx, nil, collMeta.ID, ts) if err != nil { return err } @@ -432,7 +425,7 @@ func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error { if t.Type() != commonpb.MsgType_DescribeCollection { return fmt.Errorf("describe collection, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) } - var collInfo *model.Collection + var collInfo *etcdpb.CollectionInfo var err error if t.Req.CollectionName != "" { @@ -447,13 +440,8 @@ func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error { } } - t.Rsp.Schema = &schemapb.CollectionSchema{ - Name: collInfo.Name, - Description: collInfo.Description, - AutoID: collInfo.AutoID, - Fields: model.BatchConvertToFieldSchemaPB(collInfo.Fields), - } - t.Rsp.CollectionID = collInfo.CollectionID + t.Rsp.Schema = proto.Clone(collInfo.Schema).(*schemapb.CollectionSchema) + t.Rsp.CollectionID = collInfo.ID t.Rsp.VirtualChannelNames = collInfo.VirtualChannelNames t.Rsp.PhysicalChannelNames = collInfo.PhysicalChannelNames if collInfo.ShardsNum == 0 { @@ -465,8 +453,8 @@ func (t *DescribeCollectionReqTask) Execute(ctx context.Context) error { t.Rsp.CreatedTimestamp = collInfo.CreateTime createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime) t.Rsp.CreatedUtcTimestamp = uint64(createdPhysicalTime) - t.Rsp.Aliases = t.core.MetaTable.ListAliases(collInfo.CollectionID) - t.Rsp.StartPositions = collInfo.StartPositions + t.Rsp.Aliases = t.core.MetaTable.ListAliases(collInfo.ID) + t.Rsp.StartPositions = collInfo.GetStartPositions() t.Rsp.CollectionName = t.Rsp.Schema.Name return nil } @@ -494,7 +482,7 @@ func (t *ShowCollectionReqTask) Execute(ctx context.Context) error { } for name, meta := range coll { t.Rsp.CollectionNames = append(t.Rsp.CollectionNames, name) - t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.CollectionID) + t.Rsp.CollectionIds = append(t.Rsp.CollectionIds, meta.ID) t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, meta.CreateTime) physical, _ := tsoutil.ParseHybridTs(meta.CreateTime) t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical)) @@ -533,7 +521,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error { CollectionName: t.Req.CollectionName, PartitionName: t.Req.PartitionName, DbID: 0, // todo, not used - CollectionID: collMeta.CollectionID, + CollectionID: collMeta.ID, PartitionID: partID, } @@ -566,7 +554,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error { } // update meta table after send dd operation - if err = t.core.MetaTable.AddPartition(collMeta.CollectionID, t.Req.PartitionName, partID, ts, ddOpStr); err != nil { + if err = t.core.MetaTable.AddPartition(collMeta.ID, t.Req.PartitionName, partID, ts, ddOpStr); err != nil { return err } @@ -584,7 +572,7 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error { } // invalidate all the collection meta cache with the specified collectionID - err = t.core.ExpireMetaCache(ctx, nil, collMeta.CollectionID, ts) + err = t.core.ExpireMetaCache(ctx, nil, collMeta.ID, ts) if err != nil { return err } @@ -613,7 +601,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error { if err != nil { return err } - partID, err := t.core.MetaTable.GetPartitionByName(collInfo.CollectionID, t.Req.PartitionName, 0) + partID, err := t.core.MetaTable.GetPartitionByName(collInfo.ID, t.Req.PartitionName, 0) if err != nil { return err } @@ -624,7 +612,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error { CollectionName: t.Req.CollectionName, PartitionName: t.Req.PartitionName, DbID: 0, //todo,not used - CollectionID: collInfo.CollectionID, + CollectionID: collInfo.ID, PartitionID: partID, } @@ -657,7 +645,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error { } // update meta table after send dd operation - if _, err = t.core.MetaTable.DeletePartition(collInfo.CollectionID, t.Req.PartitionName, ts, ddOpStr); err != nil { + if _, err = t.core.MetaTable.DeletePartition(collInfo.ID, t.Req.PartitionName, ts, ddOpStr); err != nil { return err } @@ -675,7 +663,7 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error { } // invalidate all the collection meta cache with the specified collectionID - err = t.core.ExpireMetaCache(ctx, nil, collInfo.CollectionID, ts) + err = t.core.ExpireMetaCache(ctx, nil, collInfo.ID, ts) if err != nil { return err } @@ -712,7 +700,7 @@ func (t *HasPartitionReqTask) Execute(ctx context.Context) error { if err != nil { return err } - t.HasPartition = t.core.MetaTable.HasPartition(coll.CollectionID, t.Req.PartitionName, 0) + t.HasPartition = t.core.MetaTable.HasPartition(coll.ID, t.Req.PartitionName, 0) return nil } @@ -733,7 +721,7 @@ func (t *ShowPartitionReqTask) Execute(ctx context.Context) error { if t.Type() != commonpb.MsgType_ShowPartitions { return fmt.Errorf("show partition, msg type = %s", commonpb.MsgType_name[int32(t.Type())]) } - var coll *model.Collection + var coll *etcdpb.CollectionInfo var err error if t.Req.CollectionName == "" { coll, err = t.core.MetaTable.GetCollectionByID(t.Req.CollectionID, 0) @@ -743,13 +731,12 @@ func (t *ShowPartitionReqTask) Execute(ctx context.Context) error { if err != nil { return err } - - for _, part := range coll.Partitions { - t.Rsp.PartitionIDs = append(t.Rsp.PartitionIDs, part.PartitionID) - t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, part.PartitionName) - t.Rsp.CreatedTimestamps = append(t.Rsp.CreatedTimestamps, part.PartitionCreatedTimestamp) - - physical, _ := tsoutil.ParseHybridTs(part.PartitionCreatedTimestamp) + t.Rsp.PartitionIDs = coll.PartitionIDs + t.Rsp.PartitionNames = coll.PartitionNames + t.Rsp.CreatedTimestamps = coll.PartitionCreatedTimestamps + t.Rsp.CreatedUtcTimestamps = make([]uint64, 0, len(coll.PartitionCreatedTimestamps)) + for _, ts := range coll.PartitionCreatedTimestamps { + physical, _ := tsoutil.ParseHybridTs(ts) t.Rsp.CreatedUtcTimestamps = append(t.Rsp.CreatedUtcTimestamps, uint64(physical)) } @@ -780,7 +767,7 @@ func (t *DescribeSegmentReqTask) Execute(ctx context.Context) error { segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, -1) if err != nil { - log.Debug("Get flushed segment from data coord failed", zap.String("collection_name", coll.Name), zap.Error(err)) + log.Debug("Get flushed segment from data coord failed", zap.String("collection_name", coll.Schema.Name), zap.Error(err)) return err } @@ -796,16 +783,16 @@ func (t *DescribeSegmentReqTask) Execute(ctx context.Context) error { return fmt.Errorf("segment id %d not belong to collection id %d", t.Req.SegmentID, t.Req.CollectionID) } //TODO, get filed_id and index_name from request - index, err := t.core.MetaTable.GetSegmentIndexInfoByID(t.Req.SegmentID, -1, "") + segIdxInfo, err := t.core.MetaTable.GetSegmentIndexInfoByID(t.Req.SegmentID, -1, "") log.Debug("RootCoord DescribeSegmentReqTask, MetaTable.GetSegmentIndexInfoByID", zap.Any("SegmentID", t.Req.SegmentID), - zap.Any("index", index), zap.Error(err)) + zap.Any("segIdxInfo", segIdxInfo), zap.Error(err)) if err != nil { return err } - t.Rsp.IndexID = index.IndexID - t.Rsp.BuildID = index.SegmentIndexes[t.Req.SegmentID].BuildID - t.Rsp.EnableIndex = index.SegmentIndexes[t.Req.SegmentID].EnableIndex - t.Rsp.FieldID = index.FieldID + t.Rsp.IndexID = segIdxInfo.IndexID + t.Rsp.BuildID = segIdxInfo.BuildID + t.Rsp.EnableIndex = segIdxInfo.EnableIndex + t.Rsp.FieldID = segIdxInfo.FieldID return nil } @@ -831,8 +818,8 @@ func (t *ShowSegmentReqTask) Execute(ctx context.Context) error { return err } exist := false - for _, partition := range coll.Partitions { - if partition.PartitionID == t.Req.PartitionID { + for _, partID := range coll.PartitionIDs { + if partID == t.Req.PartitionID { exist = true break } @@ -842,7 +829,7 @@ func (t *ShowSegmentReqTask) Execute(ctx context.Context) error { } segIDs, err := t.core.CallGetFlushedSegmentsService(ctx, t.Req.CollectionID, t.Req.PartitionID) if err != nil { - log.Debug("Get flushed segments from data coord failed", zap.String("collection name", coll.Name), zap.Int64("partition id", t.Req.PartitionID), zap.Error(err)) + log.Debug("Get flushed segments from data coord failed", zap.String("collection name", coll.Schema.Name), zap.Int64("partition id", t.Req.PartitionID), zap.Error(err)) return err } @@ -899,26 +886,23 @@ func (t *DescribeSegmentsReqTask) Execute(ctx context.Context) error { } } - index, err := t.core.MetaTable.GetSegmentIndexInfos(segID) + segmentInfo, err := t.core.MetaTable.GetSegmentIndexInfos(segID) if err != nil { continue } - for indexID, indexInfo := range index { - for _, segmentIndex := range indexInfo.SegmentIndexes { - t.Rsp.SegmentInfos[segID].IndexInfos = - append(t.Rsp.SegmentInfos[segID].IndexInfos, - &etcdpb.SegmentIndexInfo{ - CollectionID: indexInfo.CollectionID, - PartitionID: segmentIndex.Segment.PartitionID, - SegmentID: segmentIndex.Segment.SegmentID, - FieldID: indexInfo.FieldID, - IndexID: indexInfo.IndexID, - BuildID: segmentIndex.BuildID, - EnableIndex: segmentIndex.EnableIndex, - }) - } - + for indexID, indexInfo := range segmentInfo { + t.Rsp.SegmentInfos[segID].IndexInfos = + append(t.Rsp.SegmentInfos[segID].IndexInfos, + &etcdpb.SegmentIndexInfo{ + CollectionID: indexInfo.CollectionID, + PartitionID: indexInfo.PartitionID, + SegmentID: indexInfo.SegmentID, + FieldID: indexInfo.FieldID, + IndexID: indexInfo.IndexID, + BuildID: indexInfo.BuildID, + EnableIndex: indexInfo.EnableIndex, + }) extraIndexInfo, err := t.core.MetaTable.GetIndexByID(indexID) if err != nil { log.Error("index not found in meta table", @@ -928,7 +912,7 @@ func (t *DescribeSegmentsReqTask) Execute(ctx context.Context) error { zap.Int64("segment", segID)) return err } - t.Rsp.SegmentInfos[segID].ExtraIndexInfos[indexID] = model.ConvertToIndexPB(extraIndexInfo) + t.Rsp.SegmentInfos[segID].ExtraIndexInfos[indexID] = extraIndexInfo } } @@ -960,7 +944,7 @@ func (t *CreateIndexReqTask) Execute(ctx context.Context) error { if err != nil { return err } - idxInfo := &model.Index{ + idxInfo := &etcdpb.IndexInfo{ IndexName: indexName, IndexID: indexID, IndexParams: t.Req.ExtraParams, @@ -975,54 +959,45 @@ func (t *CreateIndexReqTask) Execute(ctx context.Context) error { if err != nil { return err } - segID2PartID, segID2Binlog, err := t.core.getSegments(ctx, collMeta.CollectionID) + segID2PartID, segID2Binlog, err := t.core.getSegments(ctx, collMeta.ID) flushedSegs := make([]typeutil.UniqueID, 0, len(segID2PartID)) for k := range segID2PartID { flushedSegs = append(flushedSegs, k) } if err != nil { - log.Debug("get flushed segments from data coord failed", zap.String("collection_name", collMeta.Name), zap.Error(err)) + log.Debug("Get flushed segments from data coord failed", zap.String("collection_name", collMeta.Schema.Name), zap.Error(err)) return err } segIDs, field, err := t.core.MetaTable.GetNotIndexedSegments(t.Req.CollectionName, t.Req.FieldName, idxInfo, flushedSegs) if err != nil { - log.Debug("get not indexed segments failed", zap.Int64("collection_id", collMeta.CollectionID), zap.Error(err)) + log.Debug("RootCoord CreateIndexReqTask metaTable.GetNotIndexedSegments", zap.Error(err)) return err } - if err := t.core.MetaTable.AddIndex(t.Req.CollectionName, t.Req.FieldName, idxInfo, segIDs); err != nil { - log.Debug("add index into metastore failed", zap.Int64("collection_id", collMeta.CollectionID), zap.Int64("index_id", idxInfo.IndexID), zap.Error(err)) - return err - } + collectionID := collMeta.ID + cnt := 0 for _, segID := range segIDs { - segmentIndex := model.SegmentIndex{ - Segment: model.Segment{ - SegmentID: segID, - PartitionID: segID2PartID[segID], - }, - EnableIndex: false, + info := etcdpb.SegmentIndexInfo{ + CollectionID: collectionID, + PartitionID: segID2PartID[segID], + SegmentID: segID, + FieldID: field.FieldID, + IndexID: idxInfo.IndexID, + EnableIndex: false, } - - segmentIndex.BuildID, err = t.core.BuildIndex(ctx, segID, segID2Binlog[segID].GetNumOfRows(), segID2Binlog[segID].GetFieldBinlogs(), &field, idxInfo, false) + info.BuildID, err = t.core.BuildIndex(ctx, segID, segID2Binlog[segID].GetNumOfRows(), segID2Binlog[segID].GetFieldBinlogs(), &field, idxInfo, false) if err != nil { return err } - if segmentIndex.BuildID != 0 { - segmentIndex.EnableIndex = true + if info.BuildID != 0 { + info.EnableIndex = true } - - index := &model.Index{ - CollectionID: collMeta.CollectionID, - FieldID: field.FieldID, - IndexID: idxInfo.IndexID, - SegmentIndexes: map[int64]model.SegmentIndex{segID: segmentIndex}, - } - - if err := t.core.MetaTable.AlterIndex(index); err != nil { - log.Debug("alter index into meta table failed", zap.Int64("collection_id", collMeta.CollectionID), zap.Int64("index_id", index.IndexID), zap.Int64("build_id", segmentIndex.BuildID), zap.Error(err)) + if err := t.core.MetaTable.AddIndex(&info); err != nil { + log.Debug("Add index into meta table failed", zap.Int64("collection_id", collMeta.ID), zap.Int64("index_id", info.IndexID), zap.Int64("build_id", info.BuildID), zap.Error(err)) } + cnt++ } return nil diff --git a/internal/rootcoord/task_test.go b/internal/rootcoord/task_test.go index 265285ae5a..a685482cd3 100644 --- a/internal/rootcoord/task_test.go +++ b/internal/rootcoord/task_test.go @@ -5,11 +5,13 @@ import ( "errors" "testing" - "github.com/milvus-io/milvus/internal/metastore/model" + "github.com/stretchr/testify/assert" + + "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/util/typeutil" + "github.com/milvus-io/milvus/internal/proto/commonpb" "github.com/milvus-io/milvus/internal/proto/rootcoordpb" - "github.com/milvus-io/milvus/internal/util/typeutil" - "github.com/stretchr/testify/assert" ) func TestDescribeSegmentReqTask_Type(t *testing.T) { @@ -64,28 +66,22 @@ func TestDescribeSegmentsReqTask_Execute(t *testing.T) { return []typeutil.UniqueID{segID}, nil } c.MetaTable = &MetaTable{ - segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index{}, + segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{}, } assert.NoError(t, tsk.Execute(context.Background())) // index not found in meta. c.MetaTable = &MetaTable{ - segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index{ + segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{ segID: { indexID: { CollectionID: collID, + PartitionID: partID, + SegmentID: segID, FieldID: fieldID, IndexID: indexID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: partID, - }, - BuildID: buildID, - EnableIndex: true, - }, - }, + BuildID: buildID, + EnableIndex: true, }, }, }, @@ -94,26 +90,20 @@ func TestDescribeSegmentsReqTask_Execute(t *testing.T) { // success. c.MetaTable = &MetaTable{ - segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]model.Index{ + segID2IndexMeta: map[typeutil.UniqueID]map[typeutil.UniqueID]etcdpb.SegmentIndexInfo{ segID: { indexID: { CollectionID: collID, + PartitionID: partID, + SegmentID: segID, FieldID: fieldID, IndexID: indexID, - SegmentIndexes: map[int64]model.SegmentIndex{ - segID: { - Segment: model.Segment{ - SegmentID: segID, - PartitionID: partID, - }, - BuildID: buildID, - EnableIndex: true, - }, - }, + BuildID: buildID, + EnableIndex: true, }, }, }, - indexID2Meta: map[typeutil.UniqueID]model.Index{ + indexID2Meta: map[typeutil.UniqueID]etcdpb.IndexInfo{ indexID: { IndexName: indexName, IndexID: indexID, diff --git a/internal/rootcoord/util.go b/internal/rootcoord/util.go index 03ab4b2208..79019074f2 100644 --- a/internal/rootcoord/util.go +++ b/internal/rootcoord/util.go @@ -20,11 +20,11 @@ import ( "encoding/json" "fmt" - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/golang/protobuf/proto" "github.com/milvus-io/milvus/internal/mq/msgstream" "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/milvus-io/milvus/internal/util/typeutil" ) @@ -50,8 +50,8 @@ func EqualKeyPairArray(p1 []*commonpb.KeyValuePair, p2 []*commonpb.KeyValuePair) } // GetFieldSchemaByID return field schema by id -func GetFieldSchemaByID(coll *model.Collection, fieldID typeutil.UniqueID) (*model.Field, error) { - for _, f := range coll.Fields { +func GetFieldSchemaByID(coll *etcdpb.CollectionInfo, fieldID typeutil.UniqueID) (*schemapb.FieldSchema, error) { + for _, f := range coll.Schema.Fields { if f.FieldID == fieldID { return f, nil } @@ -60,12 +60,12 @@ func GetFieldSchemaByID(coll *model.Collection, fieldID typeutil.UniqueID) (*mod } // GetFieldSchemaByIndexID return field schema by it's index id -func GetFieldSchemaByIndexID(coll *model.Collection, idxID typeutil.UniqueID) (*model.Field, error) { +func GetFieldSchemaByIndexID(coll *etcdpb.CollectionInfo, idxID typeutil.UniqueID) (*schemapb.FieldSchema, error) { var fieldID typeutil.UniqueID exist := false for _, f := range coll.FieldIndexes { if f.IndexID == idxID { - fieldID = f.FieldID + fieldID = f.FiledID exist = true break } @@ -98,6 +98,16 @@ func DecodeDdOperation(str string, ddOp *DdOperation) error { return json.Unmarshal([]byte(str), ddOp) } +// SegmentIndexInfoEqual return true if SegmentIndexInfos are identical +func SegmentIndexInfoEqual(info1 *etcdpb.SegmentIndexInfo, info2 *etcdpb.SegmentIndexInfo) bool { + return info1.CollectionID == info2.CollectionID && + info1.PartitionID == info2.PartitionID && + info1.SegmentID == info2.SegmentID && + info1.FieldID == info2.FieldID && + info1.IndexID == info2.IndexID && + info1.EnableIndex == info2.EnableIndex +} + // EncodeMsgPositions serialize []*MsgPosition into string func EncodeMsgPositions(msgPositions []*msgstream.MsgPosition) (string, error) { if len(msgPositions) == 0 { diff --git a/internal/rootcoord/util_test.go b/internal/rootcoord/util_test.go index 82c4ca5d6d..82d98b1cf5 100644 --- a/internal/rootcoord/util_test.go +++ b/internal/rootcoord/util_test.go @@ -19,10 +19,10 @@ package rootcoord import ( "testing" - "github.com/milvus-io/milvus/internal/metastore/model" - "github.com/milvus-io/milvus/internal/mq/msgstream" "github.com/milvus-io/milvus/internal/proto/commonpb" + "github.com/milvus-io/milvus/internal/proto/etcdpb" + "github.com/milvus-io/milvus/internal/proto/schemapb" "github.com/stretchr/testify/assert" ) @@ -60,10 +60,12 @@ func Test_EqualKeyPairArray(t *testing.T) { } func Test_GetFieldSchemaByID(t *testing.T) { - coll := &model.Collection{ - Fields: []*model.Field{ - { - FieldID: 1, + coll := &etcdpb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{ + Fields: []*schemapb.FieldSchema{ + { + FieldID: 1, + }, }, }, } @@ -74,15 +76,17 @@ func Test_GetFieldSchemaByID(t *testing.T) { } func Test_GetFieldSchemaByIndexID(t *testing.T) { - coll := &model.Collection{ - Fields: []*model.Field{ - { - FieldID: 1, + coll := &etcdpb.CollectionInfo{ + Schema: &schemapb.CollectionSchema{ + Fields: []*schemapb.FieldSchema{ + { + FieldID: 1, + }, }, }, - FieldIndexes: []*model.Index{ + FieldIndexes: []*etcdpb.FieldIndexInfo{ { - FieldID: 1, + FiledID: 1, IndexID: 2, }, },