enhance: index meta use independent rather than global meta lock (#30869)

issue: https://github.com/milvus-io/milvus/issues/30837

Signed-off-by: jaime <yun.zhang@zilliz.com>
This commit is contained in:
jaime 2024-03-04 16:56:59 +08:00 committed by GitHub
parent 3dc5e38240
commit 4b0c3dd377
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 3020 additions and 2800 deletions

View File

@ -318,7 +318,7 @@ func (t *compactionTrigger) updateSegmentMaxSize(segments []*SegmentInfo) (bool,
} }
collectionID := segments[0].GetCollectionID() collectionID := segments[0].GetCollectionID()
indexInfos := t.meta.GetIndexesForCollection(segments[0].GetCollectionID(), "") indexInfos := t.meta.indexMeta.GetIndexesForCollection(segments[0].GetCollectionID(), "")
ctx, cancel := context.WithTimeout(context.Background(), time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel() defer cancel()
@ -945,7 +945,8 @@ func (t *compactionTrigger) ShouldDoSingleCompaction(segment *SegmentInfo, isDis
if Params.DataCoordCfg.AutoUpgradeSegmentIndex.GetAsBool() { if Params.DataCoordCfg.AutoUpgradeSegmentIndex.GetAsBool() {
// index version of segment lower than current version and IndexFileKeys should have value, trigger compaction // index version of segment lower than current version and IndexFileKeys should have value, trigger compaction
for _, index := range segment.segmentIndexes { indexIDToSegIdxes := t.meta.indexMeta.GetSegmentIndexes(segment.CollectionID, segment.ID)
for _, index := range indexIDToSegIdxes {
if index.CurrentIndexVersion < t.indexEngineVersionManager.GetCurrentIndexEngineVersion() && if index.CurrentIndexVersion < t.indexEngineVersionManager.GetCurrentIndexEngineVersion() &&
len(index.IndexFileKeys) > 0 { len(index.IndexFileKeys) > 0 {
log.Info("index version is too old, trigger compaction", log.Info("index version is too old, trigger compaction",

View File

@ -141,25 +141,6 @@ func Test_compactionTrigger_force(t *testing.T) {
}, },
}, },
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: 1,
CollectionID: 2,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
}, },
2: { 2: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -186,7 +167,43 @@ func Test_compactionTrigger_force(t *testing.T) {
}, },
}, },
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{ },
3: {
SegmentInfo: &datapb.SegmentInfo{
ID: 3,
CollectionID: 1111,
PartitionID: 1,
LastExpireTime: 100,
NumOfRows: 100,
MaxRowNum: 300,
InsertChannel: "ch1",
State: commonpb.SegmentState_Flushed,
},
},
},
},
indexMeta: &indexMeta{
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: {
indexID: {
SegmentID: 1,
CollectionID: 2,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: nil,
IndexSize: 0,
WriteHandoff: false,
},
},
2: {
indexID: { indexID: {
SegmentID: 2, SegmentID: 2,
CollectionID: 2, CollectionID: 2,
@ -205,19 +222,7 @@ func Test_compactionTrigger_force(t *testing.T) {
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
3: { 3: {
SegmentInfo: &datapb.SegmentInfo{
ID: 3,
CollectionID: 1111,
PartitionID: 1,
LastExpireTime: 100,
NumOfRows: 100,
MaxRowNum: 300,
InsertChannel: "ch1",
State: commonpb.SegmentState_Flushed,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: 3, SegmentID: 3,
CollectionID: 1111, CollectionID: 1111,
@ -237,6 +242,47 @@ func Test_compactionTrigger_force(t *testing.T) {
}, },
}, },
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{
2: {
indexID: {
TenantID: "",
CollectionID: 2,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "HNSW",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
1000: {
indexID: {
TenantID: "",
CollectionID: 1000,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "DISKANN",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
}, },
}, },
collections: map[int64]*collectionInfo{ collections: map[int64]*collectionInfo{
@ -360,48 +406,6 @@ func Test_compactionTrigger_force(t *testing.T) {
}, },
}, },
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{
2: {
indexID: {
TenantID: "",
CollectionID: 2,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "HNSW",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
1000: {
indexID: {
TenantID: "",
CollectionID: 1000,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "DISKANN",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
}, },
&MockAllocator0{}, &MockAllocator0{},
nil, nil,
@ -645,6 +649,31 @@ func Test_compactionTrigger_force_maxSegmentLimit(t *testing.T) {
segmentInfos := &SegmentsInfo{ segmentInfos := &SegmentsInfo{
segments: make(map[UniqueID]*SegmentInfo), segments: make(map[UniqueID]*SegmentInfo),
} }
indexMeta := newSegmentIndexMeta(nil)
indexMeta.indexes = map[UniqueID]map[UniqueID]*model.Index{
2: {
indexID: {
TenantID: "",
CollectionID: 2,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "HNSW",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
}
for i := UniqueID(0); i < 50; i++ { for i := UniqueID(0); i < 50; i++ {
info := &SegmentInfo{ info := &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -671,8 +700,9 @@ func Test_compactionTrigger_force_maxSegmentLimit(t *testing.T) {
}, },
}, },
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{ }
indexID: {
indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: i, SegmentID: i,
CollectionID: 2, CollectionID: 2,
PartitionID: 1, PartitionID: 1,
@ -682,9 +712,8 @@ func Test_compactionTrigger_force_maxSegmentLimit(t *testing.T) {
NodeID: 0, NodeID: 0,
IndexVersion: 1, IndexVersion: 1,
IndexState: commonpb.IndexState_Finished, IndexState: commonpb.IndexState_Finished,
}, })
},
}
segmentInfos.segments[i] = info segmentInfos.segments[i] = info
} }
@ -719,28 +748,7 @@ func Test_compactionTrigger_force_maxSegmentLimit(t *testing.T) {
}, },
}, },
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexMeta: indexMeta,
2: {
indexID: {
TenantID: "",
CollectionID: 2,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "HNSW",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
}, },
newMockAllocator(), newMockAllocator(),
nil, nil,
@ -860,6 +868,7 @@ func Test_compactionTrigger_noplan(t *testing.T) {
"test no plan", "test no plan",
fields{ fields{
&meta{ &meta{
indexMeta: newSegmentIndexMeta(nil),
// 4 segment // 4 segment
segments: &SegmentsInfo{ segments: &SegmentsInfo{
map[int64]*SegmentInfo{ map[int64]*SegmentInfo{
@ -1047,53 +1056,37 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
1: { 1: {
SegmentInfo: genSeg(1, 20), SegmentInfo: genSeg(1, 20),
lastFlushTime: time.Now().Add(-100 * time.Minute), lastFlushTime: time.Now().Add(-100 * time.Minute),
segmentIndexes: genSegIndex(1, indexID, 20),
}, },
2: { 2: {
SegmentInfo: genSeg(2, 20), SegmentInfo: genSeg(2, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(2, indexID, 20),
}, },
3: { 3: {
SegmentInfo: genSeg(3, 20), SegmentInfo: genSeg(3, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(3, indexID, 20),
}, },
4: { 4: {
SegmentInfo: genSeg(4, 20), SegmentInfo: genSeg(4, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(4, indexID, 20),
}, },
5: { 5: {
SegmentInfo: genSeg(5, 20), SegmentInfo: genSeg(5, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(5, indexID, 20),
}, },
6: { 6: {
SegmentInfo: genSeg(6, 20), SegmentInfo: genSeg(6, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(6, indexID, 20),
},
},
},
collections: map[int64]*collectionInfo{
2: {
ID: 2,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
DataType: schemapb.DataType_FloatVector,
TypeParams: []*commonpb.KeyValuePair{
{
Key: common.DimKey,
Value: "128",
},
},
},
}, },
}, },
}, },
indexMeta: &indexMeta{
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: genSegIndex(1, indexID, 20),
2: genSegIndex(2, indexID, 20),
3: genSegIndex(3, indexID, 20),
4: genSegIndex(4, indexID, 20),
5: genSegIndex(5, indexID, 20),
6: genSegIndex(6, indexID, 20),
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
2: { 2: {
@ -1118,6 +1111,26 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
}, },
}, },
}, },
collections: map[int64]*collectionInfo{
2: {
ID: 2,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
DataType: schemapb.DataType_FloatVector,
TypeParams: []*commonpb.KeyValuePair{
{
Key: common.DimKey,
Value: "128",
},
},
},
},
},
},
},
},
newMockAllocator(), newMockAllocator(),
make(chan *compactionSignal, 1), make(chan *compactionSignal, 1),
&spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 1)}, &spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 1)},
@ -1223,52 +1236,42 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
1: { 1: {
SegmentInfo: genSeg(1, 20), SegmentInfo: genSeg(1, 20),
lastFlushTime: time.Now().Add(-100 * time.Minute), lastFlushTime: time.Now().Add(-100 * time.Minute),
segmentIndexes: genSegIndex(1, indexID, 20),
}, },
2: { 2: {
SegmentInfo: genSeg(2, 20), SegmentInfo: genSeg(2, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(2, indexID, 20),
}, },
3: { 3: {
SegmentInfo: genSeg(3, 20), SegmentInfo: genSeg(3, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(3, indexID, 20),
}, },
4: { 4: {
SegmentInfo: genSeg(4, 20), SegmentInfo: genSeg(4, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(4, indexID, 20),
}, },
5: { 5: {
SegmentInfo: genSeg(5, 20), SegmentInfo: genSeg(5, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(5, indexID, 20),
}, },
6: { 6: {
SegmentInfo: genSeg(6, 20), SegmentInfo: genSeg(6, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(6, indexID, 20),
}, },
7: { 7: {
SegmentInfo: genSeg(7, 20), SegmentInfo: genSeg(7, 20),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(7, indexID, 20),
},
},
},
collections: map[int64]*collectionInfo{
2: {
ID: 2,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
DataType: schemapb.DataType_FloatVector,
},
}, },
}, },
}, },
indexMeta: &indexMeta{
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: genSegIndex(1, indexID, 20),
2: genSegIndex(2, indexID, 20),
3: genSegIndex(3, indexID, 20),
4: genSegIndex(4, indexID, 20),
5: genSegIndex(5, indexID, 20),
6: genSegIndex(6, indexID, 20),
7: genSegIndex(7, indexID, 20),
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
2: { 2: {
@ -1293,6 +1296,20 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
}, },
}, },
}, },
collections: map[int64]*collectionInfo{
2: {
ID: 2,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
DataType: schemapb.DataType_FloatVector,
},
},
},
},
},
},
newMockAllocator(), newMockAllocator(),
make(chan *compactionSignal, 1), make(chan *compactionSignal, 1),
&spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 1)}, &spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 1)},
@ -1406,47 +1423,37 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
1: { 1: {
SegmentInfo: genSeg(1, 60), SegmentInfo: genSeg(1, 60),
lastFlushTime: time.Now().Add(-100 * time.Minute), lastFlushTime: time.Now().Add(-100 * time.Minute),
segmentIndexes: genSegIndex(1, indexID, 20),
}, },
2: { 2: {
SegmentInfo: genSeg(2, 60), SegmentInfo: genSeg(2, 60),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(2, indexID, 20),
}, },
3: { 3: {
SegmentInfo: genSeg(3, 60), SegmentInfo: genSeg(3, 60),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(3, indexID, 20),
}, },
4: { 4: {
SegmentInfo: genSeg(4, 60), SegmentInfo: genSeg(4, 60),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(4, indexID, 20),
}, },
5: { 5: {
SegmentInfo: genSeg(5, 26), SegmentInfo: genSeg(5, 26),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(5, indexID, 20),
}, },
6: { 6: {
SegmentInfo: genSeg(6, 26), SegmentInfo: genSeg(6, 26),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: genSegIndex(6, indexID, 20),
},
},
},
collections: map[int64]*collectionInfo{
2: {
ID: 2,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
DataType: schemapb.DataType_FloatVector,
},
}, },
}, },
}, },
indexMeta: &indexMeta{
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: genSegIndex(1, indexID, 20),
2: genSegIndex(2, indexID, 20),
3: genSegIndex(3, indexID, 20),
4: genSegIndex(4, indexID, 20),
5: genSegIndex(5, indexID, 20),
6: genSegIndex(6, indexID, 20),
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
2: { 2: {
@ -1471,6 +1478,20 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
}, },
}, },
}, },
collections: map[int64]*collectionInfo{
2: {
ID: 2,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: vecFieldID,
DataType: schemapb.DataType_FloatVector,
},
},
},
},
},
},
newMockAllocator(), newMockAllocator(),
make(chan *compactionSignal, 1), make(chan *compactionSignal, 1),
&spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 1)}, &spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 1)},
@ -1545,6 +1566,31 @@ func Test_compactionTrigger_noplan_random_size(t *testing.T) {
} }
vecFieldID := int64(201) vecFieldID := int64(201)
indexMeta := newSegmentIndexMeta(nil)
indexMeta.indexes = map[UniqueID]map[UniqueID]*model.Index{
2: {
indexID: {
TenantID: "",
CollectionID: 2,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "HNSW",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
}
for i := UniqueID(0); i < 50; i++ { for i := UniqueID(0); i < 50; i++ {
info := &SegmentInfo{ info := &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -1565,8 +1611,9 @@ func Test_compactionTrigger_noplan_random_size(t *testing.T) {
}, },
}, },
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: map[UniqueID]*model.SegmentIndex{ }
indexID: {
indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: i, SegmentID: i,
CollectionID: 2, CollectionID: 2,
PartitionID: 1, PartitionID: 1,
@ -1576,9 +1623,8 @@ func Test_compactionTrigger_noplan_random_size(t *testing.T) {
NodeID: 0, NodeID: 0,
IndexVersion: 1, IndexVersion: 1,
IndexState: commonpb.IndexState_Finished, IndexState: commonpb.IndexState_Finished,
}, })
},
}
segmentInfos.segments[i] = info segmentInfos.segments[i] = info
} }
@ -1613,28 +1659,7 @@ func Test_compactionTrigger_noplan_random_size(t *testing.T) {
}, },
}, },
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexMeta: indexMeta,
2: {
indexID: {
TenantID: "",
CollectionID: 2,
FieldID: vecFieldID,
IndexID: indexID,
IndexName: "_default_idx",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: common.IndexTypeKey,
Value: "HNSW",
},
},
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
}, },
newMockAllocator(), newMockAllocator(),
make(chan *compactionSignal, 1), make(chan *compactionSignal, 1),
@ -1702,7 +1727,8 @@ func Test_compactionTrigger_noplan_random_size(t *testing.T) {
// Test shouldDoSingleCompaction // Test shouldDoSingleCompaction
func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) { func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
trigger := newCompactionTrigger(&meta{}, &compactionPlanHandler{}, newMockAllocator(), newMockHandler(), newIndexEngineVersionManager()) indexMeta := newSegmentIndexMeta(nil)
trigger := newCompactionTrigger(&meta{indexMeta: indexMeta}, &compactionPlanHandler{}, newMockAllocator(), newMockHandler(), newIndexEngineVersionManager())
// Test too many deltalogs. // Test too many deltalogs.
var binlogs []*datapb.FieldBinlog var binlogs []*datapb.FieldBinlog
@ -1856,13 +1882,8 @@ func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
State: commonpb.SegmentState_Flushed, State: commonpb.SegmentState_Flushed,
Binlogs: binlogs2, Binlogs: binlogs2,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{
101: {
CurrentIndexVersion: 1,
IndexFileKeys: []string{"index1"},
},
},
} }
info5 := &SegmentInfo{ info5 := &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
ID: 1, ID: 1,
@ -1875,13 +1896,8 @@ func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
State: commonpb.SegmentState_Flushed, State: commonpb.SegmentState_Flushed,
Binlogs: binlogs2, Binlogs: binlogs2,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{
101: {
CurrentIndexVersion: 2,
IndexFileKeys: []string{"index1"},
},
},
} }
info6 := &SegmentInfo{ info6 := &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
ID: 1, ID: 1,
@ -1894,10 +1910,20 @@ func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
State: commonpb.SegmentState_Flushed, State: commonpb.SegmentState_Flushed,
Binlogs: binlogs2, Binlogs: binlogs2,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{ }
101: {
indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: 1,
IndexID: 101,
CurrentIndexVersion: 1, CurrentIndexVersion: 1,
IndexFileKeys: nil, IndexFileKeys: []string{"index1"},
})
indexMeta.indexes = map[UniqueID]map[UniqueID]*model.Index{
2: {
101: {
CollectionID: 2,
IndexID: 101,
}, },
}, },
} }
@ -1906,9 +1932,23 @@ func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
Params.Save(Params.DataCoordCfg.AutoUpgradeSegmentIndex.Key, "true") Params.Save(Params.DataCoordCfg.AutoUpgradeSegmentIndex.Key, "true")
couldDo = trigger.ShouldDoSingleCompaction(info4, false, &compactTime{expireTime: 300}) couldDo = trigger.ShouldDoSingleCompaction(info4, false, &compactTime{expireTime: 300})
assert.True(t, couldDo) assert.True(t, couldDo)
indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: 1,
IndexID: 101,
CurrentIndexVersion: 2,
IndexFileKeys: []string{"index1"},
})
// expire time < Timestamp To, and index engine version is 2 which is equal CurrentIndexVersion in segmentIndex // expire time < Timestamp To, and index engine version is 2 which is equal CurrentIndexVersion in segmentIndex
couldDo = trigger.ShouldDoSingleCompaction(info5, false, &compactTime{expireTime: 300}) couldDo = trigger.ShouldDoSingleCompaction(info5, false, &compactTime{expireTime: 300})
assert.False(t, couldDo) assert.False(t, couldDo)
indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: 1,
IndexID: 101,
CurrentIndexVersion: 1,
IndexFileKeys: nil,
})
// expire time < Timestamp To, and index engine version is 2 which is larger than CurrentIndexVersion in segmentIndex but indexFileKeys is nil // expire time < Timestamp To, and index engine version is 2 which is larger than CurrentIndexVersion in segmentIndex but indexFileKeys is nil
couldDo = trigger.ShouldDoSingleCompaction(info6, false, &compactTime{expireTime: 300}) couldDo = trigger.ShouldDoSingleCompaction(info6, false, &compactTime{expireTime: 300})
assert.False(t, couldDo) assert.False(t, couldDo)
@ -2147,47 +2187,37 @@ func (s *CompactionTriggerSuite) SetupTest() {
1: { 1: {
SegmentInfo: s.genSeg(1, 60), SegmentInfo: s.genSeg(1, 60),
lastFlushTime: time.Now().Add(-100 * time.Minute), lastFlushTime: time.Now().Add(-100 * time.Minute),
segmentIndexes: s.genSegIndex(1, indexID, 60),
}, },
2: { 2: {
SegmentInfo: s.genSeg(2, 60), SegmentInfo: s.genSeg(2, 60),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: s.genSegIndex(2, indexID, 60),
}, },
3: { 3: {
SegmentInfo: s.genSeg(3, 60), SegmentInfo: s.genSeg(3, 60),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: s.genSegIndex(3, indexID, 60),
}, },
4: { 4: {
SegmentInfo: s.genSeg(4, 60), SegmentInfo: s.genSeg(4, 60),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: s.genSegIndex(4, indexID, 60),
}, },
5: { 5: {
SegmentInfo: s.genSeg(5, 26), SegmentInfo: s.genSeg(5, 26),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: s.genSegIndex(5, indexID, 26),
}, },
6: { 6: {
SegmentInfo: s.genSeg(6, 26), SegmentInfo: s.genSeg(6, 26),
lastFlushTime: time.Now(), lastFlushTime: time.Now(),
segmentIndexes: s.genSegIndex(6, indexID, 26),
},
},
},
collections: map[int64]*collectionInfo{
s.collectionID: {
ID: s.collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: s.vecFieldID,
DataType: schemapb.DataType_FloatVector,
},
}, },
}, },
}, },
indexMeta: &indexMeta{
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: s.genSegIndex(1, indexID, 60),
2: s.genSegIndex(2, indexID, 60),
3: s.genSegIndex(3, indexID, 60),
4: s.genSegIndex(4, indexID, 60),
5: s.genSegIndex(5, indexID, 26),
6: s.genSegIndex(6, indexID, 26),
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
s.collectionID: { s.collectionID: {
@ -2211,6 +2241,20 @@ func (s *CompactionTriggerSuite) SetupTest() {
}, },
}, },
}, },
},
collections: map[int64]*collectionInfo{
s.collectionID: {
ID: s.collectionID,
Schema: &schemapb.CollectionSchema{
Fields: []*schemapb.FieldSchema{
{
FieldID: s.vecFieldID,
DataType: schemapb.DataType_FloatVector,
},
},
},
},
},
} }
s.allocator = NewNMockAllocator(s.T()) s.allocator = NewNMockAllocator(s.T())
s.compactionHandler = NewMockCompactionPlanContext(s.T()) s.compactionHandler = NewMockCompactionPlanContext(s.T())
@ -2459,25 +2503,13 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
vecFieldID1 := int64(201) vecFieldID1 := int64(201)
vecFieldID2 := int64(202) vecFieldID2 := int64(202)
segmentInfos := make([]*SegmentInfo, 0) segmentInfos := make([]*SegmentInfo, 0)
for i := UniqueID(0); i < 50; i++ { for i := UniqueID(0); i < 50; i++ {
info := &SegmentInfo{ info := &SegmentInfo{
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
ID: i, ID: i,
CollectionID: collectionID, CollectionID: collectionID,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: i,
CollectionID: collectionID,
PartitionID: 1,
NumRows: 100,
IndexID: indexID,
BuildID: i,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
},
},
} }
segmentInfos = append(segmentInfos, info) segmentInfos = append(segmentInfos, info)
} }
@ -2532,6 +2564,7 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
collections: map[int64]*collectionInfo{ collections: map[int64]*collectionInfo{
collectionID: info, collectionID: info,
}, },
indexMeta: &indexMeta{
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collectionID: { collectionID: {
indexID: { indexID: {
@ -2573,6 +2606,7 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
}, },
}, },
}, },
},
newMockAllocator(), newMockAllocator(),
nil, nil,
&spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 2)}, &spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 2)},
@ -2593,6 +2627,7 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
collections: map[int64]*collectionInfo{ collections: map[int64]*collectionInfo{
collectionID: info, collectionID: info,
}, },
indexMeta: &indexMeta{
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collectionID: { collectionID: {
indexID: { indexID: {
@ -2634,6 +2669,7 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
}, },
}, },
}, },
},
newMockAllocator(), newMockAllocator(),
nil, nil,
&spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 2)}, &spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 2)},
@ -2654,6 +2690,7 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
collections: map[int64]*collectionInfo{ collections: map[int64]*collectionInfo{
collectionID: info, collectionID: info,
}, },
indexMeta: &indexMeta{
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collectionID: { collectionID: {
indexID: { indexID: {
@ -2695,6 +2732,7 @@ func Test_compactionTrigger_updateSegmentMaxSize(t *testing.T) {
}, },
}, },
}, },
},
newMockAllocator(), newMockAllocator(),
nil, nil,
&spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 2)}, &spyCompactionHandler{spyChan: make(chan *datapb.CompactionPlan, 2)},

View File

@ -460,9 +460,9 @@ func (gc *garbageCollector) removeLogs(logs []*datapb.Binlog) bool {
func (gc *garbageCollector) recycleUnusedIndexes() { func (gc *garbageCollector) recycleUnusedIndexes() {
log.Info("start recycleUnusedIndexes") log.Info("start recycleUnusedIndexes")
deletedIndexes := gc.meta.GetDeletedIndexes() deletedIndexes := gc.meta.indexMeta.GetDeletedIndexes()
for _, index := range deletedIndexes { for _, index := range deletedIndexes {
if err := gc.meta.RemoveIndex(index.CollectionID, index.IndexID); err != nil { if err := gc.meta.indexMeta.RemoveIndex(index.CollectionID, index.IndexID); err != nil {
log.Warn("remove index on collection fail", zap.Int64("collectionID", index.CollectionID), log.Warn("remove index on collection fail", zap.Int64("collectionID", index.CollectionID),
zap.Int64("indexID", index.IndexID), zap.Error(err)) zap.Int64("indexID", index.IndexID), zap.Error(err))
continue continue
@ -471,10 +471,10 @@ func (gc *garbageCollector) recycleUnusedIndexes() {
} }
func (gc *garbageCollector) recycleUnusedSegIndexes() { func (gc *garbageCollector) recycleUnusedSegIndexes() {
segIndexes := gc.meta.GetAllSegIndexes() segIndexes := gc.meta.indexMeta.GetAllSegIndexes()
for _, segIdx := range segIndexes { for _, segIdx := range segIndexes {
if gc.meta.GetSegment(segIdx.SegmentID) == nil || !gc.meta.IsIndexExist(segIdx.CollectionID, segIdx.IndexID) { if gc.meta.GetSegment(segIdx.SegmentID) == nil || !gc.meta.indexMeta.IsIndexExist(segIdx.CollectionID, segIdx.IndexID) {
if err := gc.meta.RemoveSegmentIndex(segIdx.CollectionID, segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexID, segIdx.BuildID); err != nil { if err := gc.meta.indexMeta.RemoveSegmentIndex(segIdx.CollectionID, segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexID, segIdx.BuildID); err != nil {
log.Warn("delete index meta from etcd failed, wait to retry", zap.Int64("buildID", segIdx.BuildID), log.Warn("delete index meta from etcd failed, wait to retry", zap.Int64("buildID", segIdx.BuildID),
zap.Int64("segmentID", segIdx.SegmentID), zap.Int64("nodeID", segIdx.NodeID), zap.Error(err)) zap.Int64("segmentID", segIdx.SegmentID), zap.Int64("nodeID", segIdx.NodeID), zap.Error(err))
continue continue
@ -507,7 +507,7 @@ func (gc *garbageCollector) recycleUnusedIndexFiles() {
continue continue
} }
log.Info("garbageCollector will recycle index files", zap.Int64("buildID", buildID)) log.Info("garbageCollector will recycle index files", zap.Int64("buildID", buildID))
canRecycle, segIdx := gc.meta.CleanSegmentIndex(buildID) canRecycle, segIdx := gc.meta.indexMeta.CleanSegmentIndex(buildID)
if !canRecycle { if !canRecycle {
// Even if the index is marked as deleted, the index file will not be recycled, wait for the next gc, // Even if the index is marked as deleted, the index file will not be recycled, wait for the next gc,
// and delete all index files about the buildID at one time. // and delete all index files about the buildID at one time.

View File

@ -29,7 +29,7 @@ import (
"time" "time"
"github.com/cockroachdb/errors" "github.com/cockroachdb/errors"
minio "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
@ -349,6 +349,8 @@ func createMetaForRecycleUnusedIndexes(catalog metastore.DataCoordCatalog) *meta
segments: nil, segments: nil,
channelCPs: nil, channelCPs: nil,
chunkManager: nil, chunkManager: nil,
indexMeta: &indexMeta{
catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
@ -395,6 +397,7 @@ func createMetaForRecycleUnusedIndexes(catalog metastore.DataCoordCatalog) *meta
}, },
}, },
buildID2SegmentIndex: nil, buildID2SegmentIndex: nil,
},
} }
} }
@ -447,7 +450,16 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
NumOfRows: 1026, NumOfRows: 1026,
State: commonpb.SegmentState_Flushed, State: commonpb.SegmentState_Flushed,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{ },
segID + 1: {
SegmentInfo: nil,
},
},
},
indexMeta: &indexMeta{
catalog: catalog,
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -466,10 +478,7 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
segID + 1: { segID + 1: {
SegmentInfo: nil,
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 1, SegmentID: segID + 1,
CollectionID: collID, CollectionID: collID,
@ -489,10 +498,6 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
}, },
}, },
}, },
},
},
channelCPs: nil,
chunkManager: nil,
indexes: map[UniqueID]map[UniqueID]*model.Index{}, indexes: map[UniqueID]map[UniqueID]*model.Index{},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{ buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: { buildID: {
@ -530,6 +535,9 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
channelCPs: nil,
chunkManager: nil,
} }
} }
@ -587,7 +595,23 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
NumOfRows: 1026, NumOfRows: 1026,
State: commonpb.SegmentState_Flushed, State: commonpb.SegmentState_Flushed,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{ },
segID + 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
},
},
},
},
indexMeta: &indexMeta{
catalog: catalog,
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -606,17 +630,7 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
segID + 1: { segID + 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 1, SegmentID: segID + 1,
CollectionID: collID, CollectionID: collID,
@ -636,8 +650,6 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
}, },
}, },
}, },
},
},
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
@ -691,6 +703,7 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
} }
} }
@ -851,25 +864,6 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}, },
}, },
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
WriteHandoff: false,
},
},
}, },
segID + 1: { segID + 1: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -885,25 +879,6 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
Timestamp: 900, Timestamp: 900,
}, },
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file3", "file4"},
IndexSize: 1024,
WriteHandoff: false,
},
},
}, },
segID + 2: { segID + 2: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -920,7 +895,6 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}, },
CompactionFrom: []int64{segID, segID + 1}, CompactionFrom: []int64{segID, segID + 1},
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
}, },
segID + 3: { segID + 3: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -937,7 +911,6 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}, },
CompactionFrom: nil, CompactionFrom: nil,
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
}, },
segID + 4: { segID + 4: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -954,7 +927,6 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}, },
CompactionFrom: []int64{segID + 2, segID + 3}, CompactionFrom: []int64{segID + 2, segID + 3},
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
}, },
segID + 5: { segID + 5: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -1009,6 +981,50 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}, },
}, },
}, },
indexMeta: &indexMeta{
catalog: catalog,
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: {
SegmentID: segID,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1024,
WriteHandoff: false,
},
},
segID + 1: {
indexID: {
SegmentID: segID + 1,
CollectionID: collID,
PartitionID: partID,
NumRows: 5000,
IndexID: indexID,
BuildID: buildID + 1,
NodeID: 0,
IndexVersion: 1,
IndexState: commonpb.IndexState_Finished,
FailReason: "",
IsDeleted: false,
CreateTime: 0,
IndexFileKeys: []string{"file3", "file4"},
IndexSize: 1024,
WriteHandoff: false,
},
},
},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{ buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: { buildID: {
SegmentID: segID, SegmentID: segID,
@ -1062,6 +1078,8 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}, },
}, },
}, },
},
collections: map[UniqueID]*collectionInfo{ collections: map[UniqueID]*collectionInfo{
collID: { collID: {
ID: collID, ID: collID,
@ -1135,7 +1153,7 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
assert.Nil(t, segG) assert.Nil(t, segG)
segH := gc.meta.GetSegment(segID + 7) segH := gc.meta.GetSegment(segID + 7)
assert.NotNil(t, segH) assert.NotNil(t, segH)
err := gc.meta.AddSegmentIndex(&model.SegmentIndex{ err := gc.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: segID + 4, SegmentID: segID + 4,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
@ -1145,7 +1163,7 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = gc.meta.FinishTask(&indexpb.IndexTaskInfo{ err = gc.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID + 4, BuildID: buildID + 4,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
IndexFileKeys: []string{"file1", "file2", "file3", "file4"}, IndexFileKeys: []string{"file1", "file2", "file3", "file4"},

View File

@ -128,7 +128,7 @@ func (ib *indexBuilder) Stop() {
func (ib *indexBuilder) reloadFromKV() { func (ib *indexBuilder) reloadFromKV() {
segments := ib.meta.GetAllSegmentsUnsafe() segments := ib.meta.GetAllSegmentsUnsafe()
for _, segment := range segments { for _, segment := range segments {
for _, segIndex := range segment.segmentIndexes { for _, segIndex := range ib.meta.indexMeta.getSegmentIndexes(segment.ID) {
if segIndex.IsDeleted { if segIndex.IsDeleted {
continue continue
} }
@ -235,7 +235,7 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
delete(ib.tasks, buildID) delete(ib.tasks, buildID)
} }
meta, exist := ib.meta.GetIndexJob(buildID) meta, exist := ib.meta.indexMeta.GetIndexJob(buildID)
if !exist { if !exist {
log.Ctx(ib.ctx).Debug("index task has not exist in meta table, remove task", zap.Int64("buildID", buildID)) log.Ctx(ib.ctx).Debug("index task has not exist in meta table, remove task", zap.Int64("buildID", buildID))
deleteFunc(buildID) deleteFunc(buildID)
@ -245,21 +245,21 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
switch state { switch state {
case indexTaskInit: case indexTaskInit:
segment := ib.meta.GetSegment(meta.SegmentID) segment := ib.meta.GetSegment(meta.SegmentID)
if !isSegmentHealthy(segment) || !ib.meta.IsIndexExist(meta.CollectionID, meta.IndexID) { if !isSegmentHealthy(segment) || !ib.meta.indexMeta.IsIndexExist(meta.CollectionID, meta.IndexID) {
log.Ctx(ib.ctx).Info("task is no need to build index, remove it", zap.Int64("buildID", buildID)) log.Ctx(ib.ctx).Info("task is no need to build index, remove it", zap.Int64("buildID", buildID))
if err := ib.meta.DeleteTask(buildID); err != nil { if err := ib.meta.indexMeta.DeleteTask(buildID); err != nil {
log.Ctx(ib.ctx).Warn("IndexCoord delete index failed", zap.Int64("buildID", buildID), zap.Error(err)) log.Ctx(ib.ctx).Warn("IndexCoord delete index failed", zap.Int64("buildID", buildID), zap.Error(err))
return false return false
} }
deleteFunc(buildID) deleteFunc(buildID)
return true return true
} }
indexParams := ib.meta.GetIndexParams(meta.CollectionID, meta.IndexID) indexParams := ib.meta.indexMeta.GetIndexParams(meta.CollectionID, meta.IndexID)
indexType := getIndexType(indexParams) indexType := getIndexType(indexParams)
if isFlatIndex(indexType) || meta.NumRows < Params.DataCoordCfg.MinSegmentNumRowsToEnableIndex.GetAsInt64() { if isFlatIndex(indexType) || meta.NumRows < Params.DataCoordCfg.MinSegmentNumRowsToEnableIndex.GetAsInt64() {
log.Ctx(ib.ctx).Info("segment does not need index really", zap.Int64("buildID", buildID), log.Ctx(ib.ctx).Info("segment does not need index really", zap.Int64("buildID", buildID),
zap.Int64("segmentID", meta.SegmentID), zap.Int64("num rows", meta.NumRows)) zap.Int64("segmentID", meta.SegmentID), zap.Int64("num rows", meta.NumRows))
if err := ib.meta.FinishTask(&indexpb.IndexTaskInfo{ if err := ib.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: buildID, BuildID: buildID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
IndexFileKeys: nil, IndexFileKeys: nil,
@ -280,7 +280,7 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
return false return false
} }
// update version and set nodeID // update version and set nodeID
if err := ib.meta.UpdateVersion(buildID, nodeID); err != nil { if err := ib.meta.indexMeta.UpdateVersion(buildID, nodeID); err != nil {
log.Ctx(ib.ctx).Warn("index builder update index version failed", zap.Int64("build", buildID), zap.Error(err)) log.Ctx(ib.ctx).Warn("index builder update index version failed", zap.Int64("build", buildID), zap.Error(err))
return false return false
} }
@ -305,7 +305,7 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
} }
} }
typeParams := ib.meta.GetTypeParams(meta.CollectionID, meta.IndexID) typeParams := ib.meta.indexMeta.GetTypeParams(meta.CollectionID, meta.IndexID)
var storageConfig *indexpb.StorageConfig var storageConfig *indexpb.StorageConfig
if Params.CommonCfg.StorageType.GetValue() == "local" { if Params.CommonCfg.StorageType.GetValue() == "local" {
@ -331,7 +331,7 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
} }
} }
fieldID := ib.meta.GetFieldIDByIndexID(meta.CollectionID, meta.IndexID) fieldID := ib.meta.indexMeta.GetFieldIDByIndexID(meta.CollectionID, meta.IndexID)
binlogIDs := getBinLogIds(segment, fieldID) binlogIDs := getBinLogIds(segment, fieldID)
if isDiskANNIndex(getIndexType(indexParams)) { if isDiskANNIndex(getIndexType(indexParams)) {
var err error var err error
@ -428,7 +428,7 @@ func (ib *indexBuilder) process(buildID UniqueID) bool {
log.Ctx(ib.ctx).Info("index task assigned successfully", zap.Int64("buildID", buildID), log.Ctx(ib.ctx).Info("index task assigned successfully", zap.Int64("buildID", buildID),
zap.Int64("segmentID", meta.SegmentID), zap.Int64("nodeID", nodeID)) zap.Int64("segmentID", meta.SegmentID), zap.Int64("nodeID", nodeID))
// update index meta state to InProgress // update index meta state to InProgress
if err := ib.meta.BuildIndex(buildID); err != nil { if err := ib.meta.indexMeta.BuildIndex(buildID); err != nil {
// need to release lock then reassign, so set task state to retry // need to release lock then reassign, so set task state to retry
log.Ctx(ib.ctx).Warn("index builder update index meta to InProgress failed", zap.Int64("buildID", buildID), log.Ctx(ib.ctx).Warn("index builder update index meta to InProgress failed", zap.Int64("buildID", buildID),
zap.Int64("nodeID", nodeID), zap.Error(err)) zap.Int64("nodeID", nodeID), zap.Error(err))
@ -481,7 +481,7 @@ func (ib *indexBuilder) getTaskState(buildID, nodeID UniqueID) indexTaskState {
if info.GetState() == commonpb.IndexState_Failed || info.GetState() == commonpb.IndexState_Finished { if info.GetState() == commonpb.IndexState_Failed || info.GetState() == commonpb.IndexState_Finished {
log.Ctx(ib.ctx).Info("this task has been finished", zap.Int64("buildID", info.GetBuildID()), log.Ctx(ib.ctx).Info("this task has been finished", zap.Int64("buildID", info.GetBuildID()),
zap.String("index state", info.GetState().String())) zap.String("index state", info.GetState().String()))
if err := ib.meta.FinishTask(info); err != nil { if err := ib.meta.indexMeta.FinishTask(info); err != nil {
log.Ctx(ib.ctx).Warn("IndexCoord update index state fail", zap.Int64("buildID", info.GetBuildID()), log.Ctx(ib.ctx).Warn("IndexCoord update index state fail", zap.Int64("buildID", info.GetBuildID()),
zap.String("index state", info.GetState().String()), zap.Error(err)) zap.String("index state", info.GetState().String()), zap.Error(err))
return indexTaskInProgress return indexTaskInProgress
@ -552,7 +552,7 @@ func (ib *indexBuilder) assignTask(builderClient types.IndexNodeClient, req *ind
func (ib *indexBuilder) nodeDown(nodeID UniqueID) { func (ib *indexBuilder) nodeDown(nodeID UniqueID) {
defer ib.notify() defer ib.notify()
metas := ib.meta.GetMetasByNodeID(nodeID) metas := ib.meta.indexMeta.GetMetasByNodeID(nodeID)
ib.taskMutex.Lock() ib.taskMutex.Lock()
defer ib.taskMutex.Unlock() defer ib.taskMutex.Unlock()

View File

@ -54,7 +54,7 @@ var (
) )
func createMetaTable(catalog metastore.DataCoordCatalog) *meta { func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
return &meta{ segIndeMeta := &indexMeta{
catalog: catalog, catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -81,20 +81,8 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
}, },
}, },
}, },
segments: &SegmentsInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segments: map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -112,19 +100,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 1: { segID + 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 1, SegmentID: segID + 1,
CollectionID: collID, CollectionID: collID,
@ -142,19 +118,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 2: { segID + 2: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 2,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 2, SegmentID: segID + 2,
CollectionID: collID, CollectionID: collID,
@ -172,19 +136,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 3: { segID + 3: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 3,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 500,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 3, SegmentID: segID + 3,
CollectionID: collID, CollectionID: collID,
@ -202,19 +154,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 4: { segID + 4: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 4,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 4, SegmentID: segID + 4,
CollectionID: collID, CollectionID: collID,
@ -232,19 +172,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 5: { segID + 5: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 5,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 5, SegmentID: segID + 5,
CollectionID: collID, CollectionID: collID,
@ -262,19 +190,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 6: { segID + 6: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 6,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 6, SegmentID: segID + 6,
CollectionID: collID, CollectionID: collID,
@ -292,19 +208,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 7: { segID + 7: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 7,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 7, SegmentID: segID + 7,
CollectionID: collID, CollectionID: collID,
@ -322,19 +226,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 8: { segID + 8: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 8,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 8, SegmentID: segID + 8,
CollectionID: collID, CollectionID: collID,
@ -352,19 +244,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 9: { segID + 9: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 9,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 500,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 9, SegmentID: segID + 9,
CollectionID: collID, CollectionID: collID,
@ -382,19 +262,7 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segID + 10: { segID + 10: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 10,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 500,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID + 10, SegmentID: segID + 10,
CollectionID: collID, CollectionID: collID,
@ -413,8 +281,6 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
}, },
}, },
}, },
},
},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{ buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: { buildID: {
SegmentID: segID, SegmentID: segID,
@ -594,6 +460,147 @@ func createMetaTable(catalog metastore.DataCoordCatalog) *meta {
}, },
}, },
} }
return &meta{
indexMeta: segIndeMeta,
catalog: catalog,
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 2: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 2,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 3: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 3,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 500,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 4: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 4,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 5: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 5,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 6: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 6,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 7: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 7,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 8: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 8,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1026,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 9: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 9,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 500,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
segID + 10: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 10,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 500,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
},
},
}
} }
func TestIndexBuilder(t *testing.T) { func TestIndexBuilder(t *testing.T) {
@ -699,7 +706,7 @@ func TestIndexBuilder(t *testing.T) {
IndexFileKeys: nil, IndexFileKeys: nil,
IndexSize: 0, IndexSize: 0,
} }
err := ib.meta.AddSegmentIndex(segIdx) err := ib.meta.indexMeta.AddSegmentIndex(segIdx)
assert.NoError(t, err) assert.NoError(t, err)
ib.enqueue(buildID + 10) ib.enqueue(buildID + 10)
}) })
@ -785,7 +792,7 @@ func TestIndexBuilder_Error(t *testing.T) {
t.Run("no need to build index but update catalog failed", func(t *testing.T) { t.Run("no need to build index but update catalog failed", func(t *testing.T) {
ib.meta.catalog = ec ib.meta.catalog = ec
ib.meta.indexes[collID][indexID].IsDeleted = true ib.meta.indexMeta.indexes[collID][indexID].IsDeleted = true
ib.tasks[buildID] = indexTaskInit ib.tasks[buildID] = indexTaskInit
ok := ib.process(buildID) ok := ib.process(buildID)
assert.False(t, ok) assert.False(t, ok)
@ -795,19 +802,22 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("init no need to build index", func(t *testing.T) { t.Run("init no need to build index", func(t *testing.T) {
ib.meta.indexMeta.catalog = sc
ib.meta.catalog = sc ib.meta.catalog = sc
ib.meta.indexes[collID][indexID].IsDeleted = true
ib.meta.indexMeta.indexes[collID][indexID].IsDeleted = true
ib.tasks[buildID] = indexTaskInit ib.tasks[buildID] = indexTaskInit
ib.process(buildID) ib.process(buildID)
_, ok := ib.tasks[buildID] _, ok := ib.tasks[buildID]
assert.False(t, ok) assert.False(t, ok)
ib.meta.indexes[collID][indexID].IsDeleted = false ib.meta.indexMeta.indexes[collID][indexID].IsDeleted = false
}) })
t.Run("assign task error", func(t *testing.T) { t.Run("assign task error", func(t *testing.T) {
paramtable.Get().Save(Params.CommonCfg.StorageType.Key, "local") paramtable.Get().Save(Params.CommonCfg.StorageType.Key, "local")
ib.tasks[buildID] = indexTaskInit ib.tasks[buildID] = indexTaskInit
ib.meta.indexMeta.catalog = sc
ib.meta.catalog = sc ib.meta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
@ -831,7 +841,9 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("assign task fail", func(t *testing.T) { t.Run("assign task fail", func(t *testing.T) {
paramtable.Get().Save(Params.CommonCfg.StorageType.Key, "local") paramtable.Get().Save(Params.CommonCfg.StorageType.Key, "local")
ib.meta.indexMeta.catalog = sc
ib.meta.catalog = sc ib.meta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().CreateJob(mock.Anything, mock.Anything, mock.Anything).Return(&commonpb.Status{ ic.EXPECT().CreateJob(mock.Anything, mock.Anything, mock.Anything).Return(&commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError, ErrorCode: commonpb.ErrorCode_UnexpectedError,
@ -857,7 +869,7 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("drop job error", func(t *testing.T) { t.Run("drop job error", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = sc ib.meta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().DropJobs(mock.Anything, mock.Anything, mock.Anything).Return(&commonpb.Status{ ic.EXPECT().DropJobs(mock.Anything, mock.Anything, mock.Anything).Return(&commonpb.Status{
@ -886,7 +898,7 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("drop job fail", func(t *testing.T) { t.Run("drop job fail", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = sc ib.meta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().DropJobs(mock.Anything, mock.Anything, mock.Anything).Return(&commonpb.Status{ ic.EXPECT().DropJobs(mock.Anything, mock.Anything, mock.Anything).Return(&commonpb.Status{
@ -916,8 +928,10 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("get state error", func(t *testing.T) { t.Run("get state error", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = sc ib.meta.catalog = sc
ib.meta.indexMeta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("error")) ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("error"))
ib.nodeManager = &IndexNodeManager{ ib.nodeManager = &IndexNodeManager{
@ -936,8 +950,10 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("get state fail", func(t *testing.T) { t.Run("get state fail", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = sc ib.meta.catalog = sc
ib.meta.indexMeta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{ ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
Status: &commonpb.Status{ Status: &commonpb.Status{
@ -961,8 +977,10 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("finish task fail", func(t *testing.T) { t.Run("finish task fail", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = ec ib.meta.catalog = ec
ib.meta.indexMeta.catalog = ec
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{ ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
Status: merr.Success(), Status: merr.Success(),
@ -993,7 +1011,7 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("task still in progress", func(t *testing.T) { t.Run("task still in progress", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = ec ib.meta.catalog = ec
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{ ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
@ -1025,7 +1043,7 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("indexNode has no task", func(t *testing.T) { t.Run("indexNode has no task", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = sc ib.meta.catalog = sc
ic := mocks.NewMockIndexNodeClient(t) ic := mocks.NewMockIndexNodeClient(t)
ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{ ic.EXPECT().QueryJobs(mock.Anything, mock.Anything, mock.Anything).Return(&indexpb.QueryJobsResponse{
@ -1048,7 +1066,7 @@ func TestIndexBuilder_Error(t *testing.T) {
}) })
t.Run("node not exist", func(t *testing.T) { t.Run("node not exist", func(t *testing.T) {
ib.meta.buildID2SegmentIndex[buildID].NodeID = nodeID ib.meta.indexMeta.buildID2SegmentIndex[buildID].NodeID = nodeID
ib.meta.catalog = sc ib.meta.catalog = sc
ib.nodeManager = &IndexNodeManager{ ib.nodeManager = &IndexNodeManager{
ctx: context.Background(), ctx: context.Background(),
@ -1179,7 +1197,7 @@ func TestIndexBuilderV2(t *testing.T) {
IndexFileKeys: nil, IndexFileKeys: nil,
IndexSize: 0, IndexSize: 0,
} }
err := ib.meta.AddSegmentIndex(segIdx) err := ib.meta.indexMeta.AddSegmentIndex(segIdx)
assert.NoError(t, err) assert.NoError(t, err)
ib.enqueue(buildID + 10) ib.enqueue(buildID + 10)
}) })
@ -1276,6 +1294,9 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
CreatedAt: 0, CreatedAt: 0,
}, },
}, },
indexMeta: &indexMeta{
catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
@ -1305,20 +1326,8 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
}, },
}, },
}, },
segments: &SegmentsInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segments: map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: minNumberOfRowsToBuild,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -1337,8 +1346,6 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
}, },
}, },
}, },
},
},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{ buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
buildID: { buildID: {
SegmentID: segID, SegmentID: segID,
@ -1357,6 +1364,23 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
IndexSize: 0, IndexSize: 0,
}, },
}, },
},
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: minNumberOfRowsToBuild,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: 10,
},
},
},
},
} }
nodeManager := &IndexNodeManager{ nodeManager := &IndexNodeManager{
@ -1383,9 +1407,9 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
} }
resetMetaFunc := func() { resetMetaFunc := func() {
mt.buildID2SegmentIndex[buildID].IndexState = commonpb.IndexState_Unissued mt.indexMeta.buildID2SegmentIndex[buildID].IndexState = commonpb.IndexState_Unissued
mt.segments.segments[segID].segmentIndexes[indexID].IndexState = commonpb.IndexState_Unissued mt.indexMeta.segmentIndexes[segID][indexID].IndexState = commonpb.IndexState_Unissued
mt.indexes[collID][indexID].IndexParams[1].Value = indexparamcheck.IndexHNSW mt.indexMeta.indexes[collID][indexID].IndexParams[1].Value = indexparamcheck.IndexHNSW
mt.collections[collID].Schema.Fields[1].IsPartitionKey = true mt.collections[collID].Schema.Fields[1].IsPartitionKey = true
} }
@ -1431,7 +1455,7 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
assert.NotZero(t, len(in.OptionalScalarFields), "optional scalar field should be set") assert.NotZero(t, len(in.OptionalScalarFields), "optional scalar field should be set")
return merr.Success(), nil return merr.Success(), nil
}).Once() }).Once()
err := ib.meta.AddSegmentIndex(segIdx) err := ib.meta.indexMeta.AddSegmentIndex(segIdx)
assert.NoError(t, err) assert.NoError(t, err)
ib.enqueue(buildID) ib.enqueue(buildID)
waitTaskDoneFunc(ib) waitTaskDoneFunc(ib)
@ -1446,7 +1470,7 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
assert.Zero(t, len(in.OptionalScalarFields), "optional scalar field should be set") assert.Zero(t, len(in.OptionalScalarFields), "optional scalar field should be set")
return merr.Success(), nil return merr.Success(), nil
}).Once() }).Once()
err := ib.meta.AddSegmentIndex(segIdx) err := ib.meta.indexMeta.AddSegmentIndex(segIdx)
assert.NoError(t, err) assert.NoError(t, err)
ib.enqueue(buildID) ib.enqueue(buildID)
waitTaskDoneFunc(ib) waitTaskDoneFunc(ib)
@ -1455,13 +1479,13 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
t.Run("enqueue returns empty optional field when index is not HNSW", func(t *testing.T) { t.Run("enqueue returns empty optional field when index is not HNSW", func(t *testing.T) {
paramtable.Get().CommonCfg.EnableNodeFilteringOnPartitionKey.SwapTempValue("true") paramtable.Get().CommonCfg.EnableNodeFilteringOnPartitionKey.SwapTempValue("true")
mt.indexes[collID][indexID].IndexParams[1].Value = indexparamcheck.IndexDISKANN mt.indexMeta.indexes[collID][indexID].IndexParams[1].Value = indexparamcheck.IndexDISKANN
ic.EXPECT().CreateJob(mock.Anything, mock.Anything, mock.Anything, mock.Anything).RunAndReturn( ic.EXPECT().CreateJob(mock.Anything, mock.Anything, mock.Anything, mock.Anything).RunAndReturn(
func(ctx context.Context, in *indexpb.CreateJobRequest, opts ...grpc.CallOption) (*commonpb.Status, error) { func(ctx context.Context, in *indexpb.CreateJobRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
assert.Zero(t, len(in.OptionalScalarFields), "optional scalar field should be set") assert.Zero(t, len(in.OptionalScalarFields), "optional scalar field should be set")
return merr.Success(), nil return merr.Success(), nil
}).Once() }).Once()
err := ib.meta.AddSegmentIndex(segIdx) err := ib.meta.indexMeta.AddSegmentIndex(segIdx)
assert.NoError(t, err) assert.NoError(t, err)
ib.enqueue(buildID) ib.enqueue(buildID)
waitTaskDoneFunc(ib) waitTaskDoneFunc(ib)
@ -1476,7 +1500,7 @@ func TestVecIndexWithOptionalScalarField(t *testing.T) {
assert.Zero(t, len(in.OptionalScalarFields), "optional scalar field should be set") assert.Zero(t, len(in.OptionalScalarFields), "optional scalar field should be set")
return merr.Success(), nil return merr.Success(), nil
}).Once() }).Once()
err := ib.meta.AddSegmentIndex(segIdx) err := ib.meta.indexMeta.AddSegmentIndex(segIdx)
assert.NoError(t, err) assert.NoError(t, err)
ib.enqueue(buildID) ib.enqueue(buildID)
waitTaskDoneFunc(ib) waitTaskDoneFunc(ib)

View File

@ -21,33 +21,98 @@ import (
"context" "context"
"fmt" "fmt"
"strconv" "strconv"
"sync"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics" "github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/timerecord"
) )
func (m *meta) updateCollectionIndex(index *model.Index) { type indexMeta struct {
sync.RWMutex
ctx context.Context
catalog metastore.DataCoordCatalog
// collectionIndexes records which indexes are on the collection
// collID -> indexID -> index
indexes map[UniqueID]map[UniqueID]*model.Index
// buildID2Meta records the meta information of the segment
// buildID -> segmentIndex
buildID2SegmentIndex map[UniqueID]*model.SegmentIndex
// segmentID -> indexID -> segmentIndex
segmentIndexes map[UniqueID]map[UniqueID]*model.SegmentIndex
}
// NewMeta creates meta from provided `kv.TxnKV`
func newIndexMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (*indexMeta, error) {
mt := &indexMeta{
ctx: ctx,
catalog: catalog,
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
}
err := mt.reloadFromKV()
if err != nil {
return nil, err
}
return mt, nil
}
// reloadFromKV loads meta from KV storage
func (m *indexMeta) reloadFromKV() error {
record := timerecord.NewTimeRecorder("indexMeta-reloadFromKV")
// load field indexes
fieldIndexes, err := m.catalog.ListIndexes(m.ctx)
if err != nil {
log.Error("indexMeta reloadFromKV load field indexes fail", zap.Error(err))
return err
}
for _, fieldIndex := range fieldIndexes {
m.updateCollectionIndex(fieldIndex)
}
segmentIndexes, err := m.catalog.ListSegmentIndexes(m.ctx)
if err != nil {
log.Error("indexMeta reloadFromKV load segment indexes fail", zap.Error(err))
return err
}
for _, segIdx := range segmentIndexes {
m.updateSegmentIndex(segIdx)
metrics.FlushedSegmentFileNum.WithLabelValues(metrics.IndexFileLabel).Observe(float64(len(segIdx.IndexFileKeys)))
}
log.Info("indexMeta reloadFromKV done", zap.Duration("duration", record.ElapseSpan()))
return nil
}
func (m *indexMeta) updateCollectionIndex(index *model.Index) {
if _, ok := m.indexes[index.CollectionID]; !ok { if _, ok := m.indexes[index.CollectionID]; !ok {
m.indexes[index.CollectionID] = make(map[UniqueID]*model.Index) m.indexes[index.CollectionID] = make(map[UniqueID]*model.Index)
} }
m.indexes[index.CollectionID][index.IndexID] = index m.indexes[index.CollectionID][index.IndexID] = index
} }
func (m *meta) updateSegmentIndex(segIdx *model.SegmentIndex) { func (m *indexMeta) updateSegmentIndex(segIdx *model.SegmentIndex) {
m.segments.SetSegmentIndex(segIdx.SegmentID, segIdx) indexes, ok := m.segmentIndexes[segIdx.SegmentID]
if ok {
indexes[segIdx.IndexID] = segIdx
} else {
m.segmentIndexes[segIdx.SegmentID] = make(map[UniqueID]*model.SegmentIndex)
m.segmentIndexes[segIdx.SegmentID][segIdx.IndexID] = segIdx
}
m.buildID2SegmentIndex[segIdx.BuildID] = segIdx m.buildID2SegmentIndex[segIdx.BuildID] = segIdx
} }
func (m *meta) alterSegmentIndexes(segIdxes []*model.SegmentIndex) error { func (m *indexMeta) alterSegmentIndexes(segIdxes []*model.SegmentIndex) error {
err := m.catalog.AlterSegmentIndexes(m.ctx, segIdxes) err := m.catalog.AlterSegmentIndexes(m.ctx, segIdxes)
if err != nil { if err != nil {
log.Error("failed to alter segments index in meta store", zap.Int("segment indexes num", len(segIdxes)), log.Error("failed to alter segments index in meta store", zap.Int("segment indexes num", len(segIdxes)),
@ -60,15 +125,15 @@ func (m *meta) alterSegmentIndexes(segIdxes []*model.SegmentIndex) error {
return nil return nil
} }
func (m *meta) updateIndexMeta(index *model.Index, updateFunc func(clonedIndex *model.Index) error) error { func (m *indexMeta) updateIndexMeta(index *model.Index, updateFunc func(clonedIndex *model.Index) error) error {
return updateFunc(model.CloneIndex(index)) return updateFunc(model.CloneIndex(index))
} }
func (m *meta) updateSegIndexMeta(segIdx *model.SegmentIndex, updateFunc func(clonedSegIdx *model.SegmentIndex) error) error { func (m *indexMeta) updateSegIndexMeta(segIdx *model.SegmentIndex, updateFunc func(clonedSegIdx *model.SegmentIndex) error) error {
return updateFunc(model.CloneSegmentIndex(segIdx)) return updateFunc(model.CloneSegmentIndex(segIdx))
} }
func (m *meta) updateIndexTasksMetrics() { func (m *indexMeta) updateIndexTasksMetrics() {
taskMetrics := make(map[UniqueID]map[commonpb.IndexState]int) taskMetrics := make(map[UniqueID]map[commonpb.IndexState]int)
for _, segIdx := range m.buildID2SegmentIndex { for _, segIdx := range m.buildID2SegmentIndex {
if segIdx.IsDeleted { if segIdx.IsDeleted {
@ -138,7 +203,7 @@ func checkParams(fieldIndex *model.Index, req *indexpb.CreateIndexRequest) bool
return !notEq return !notEq
} }
func (m *meta) CanCreateIndex(req *indexpb.CreateIndexRequest) (UniqueID, error) { func (m *indexMeta) CanCreateIndex(req *indexpb.CreateIndexRequest) (UniqueID, error) {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -171,7 +236,7 @@ func (m *meta) CanCreateIndex(req *indexpb.CreateIndexRequest) (UniqueID, error)
} }
// HasSameReq determine whether there are same indexing tasks. // HasSameReq determine whether there are same indexing tasks.
func (m *meta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) { func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -194,7 +259,7 @@ func (m *meta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) {
return false, 0 return false, 0
} }
func (m *meta) CreateIndex(index *model.Index) error { func (m *indexMeta) CreateIndex(index *model.Index) error {
log.Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID), log.Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID),
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName)) zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName))
m.Lock() m.Lock()
@ -213,7 +278,7 @@ func (m *meta) CreateIndex(index *model.Index) error {
return nil return nil
} }
func (m *meta) AlterIndex(ctx context.Context, indexes ...*model.Index) error { func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -230,7 +295,7 @@ func (m *meta) AlterIndex(ctx context.Context, indexes ...*model.Index) error {
} }
// AddSegmentIndex adds the index meta corresponding the indexBuildID to meta table. // AddSegmentIndex adds the index meta corresponding the indexBuildID to meta table.
func (m *meta) AddSegmentIndex(segIndex *model.SegmentIndex) error { func (m *indexMeta) AddSegmentIndex(segIndex *model.SegmentIndex) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -254,7 +319,7 @@ func (m *meta) AddSegmentIndex(segIndex *model.SegmentIndex) error {
return nil return nil
} }
func (m *meta) GetIndexIDByName(collID int64, indexName string) map[int64]uint64 { func (m *indexMeta) GetIndexIDByName(collID int64, indexName string) map[int64]uint64 {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
indexID2CreateTs := make(map[int64]uint64) indexID2CreateTs := make(map[int64]uint64)
@ -272,7 +337,7 @@ func (m *meta) GetIndexIDByName(collID int64, indexName string) map[int64]uint64
return indexID2CreateTs return indexID2CreateTs
} }
func (m *meta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID) *indexpb.SegmentIndexState { func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID) *indexpb.SegmentIndexState {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -286,14 +351,16 @@ func (m *meta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID
state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID) state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID)
return state return state
} }
segment := m.segments.GetSegment(segmentID)
if segment == nil { indexes, ok := m.segmentIndexes[segmentID]
state.FailReason = fmt.Sprintf("segment is not exist with ID: %d", segmentID) if !ok {
state.State = commonpb.IndexState_Unissued
state.FailReason = fmt.Sprintf("segment index not exist with ID: %d", segmentID)
return state return state
} }
if index, ok := fieldIndexes[indexID]; ok && !index.IsDeleted { if index, ok := fieldIndexes[indexID]; ok && !index.IsDeleted {
if segIdx, ok := segment.segmentIndexes[indexID]; ok { if segIdx, ok := indexes[indexID]; ok {
state.IndexName = index.IndexName state.IndexName = index.IndexName
state.State = segIdx.IndexState state.State = segIdx.IndexState
state.FailReason = segIdx.FailReason state.FailReason = segIdx.FailReason
@ -307,7 +374,7 @@ func (m *meta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID
return state return state
} }
func (m *meta) GetSegmentIndexStateOnField(collID, segmentID, fieldID UniqueID) *indexpb.SegmentIndexState { func (m *indexMeta) GetSegmentIndexStateOnField(collID, segmentID, fieldID UniqueID) *indexpb.SegmentIndexState {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -321,14 +388,17 @@ func (m *meta) GetSegmentIndexStateOnField(collID, segmentID, fieldID UniqueID)
state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID) state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID)
return state return state
} }
segment := m.segments.GetSegment(segmentID)
if segment == nil { indexes, ok := m.segmentIndexes[segmentID]
state.FailReason = fmt.Sprintf("segment is not exist with ID: %d", segmentID) if !ok {
state.FailReason = fmt.Sprintf("segment index not exist with ID: %d", segmentID)
state.State = commonpb.IndexState_Unissued
return state return state
} }
for indexID, index := range fieldIndexes { for indexID, index := range fieldIndexes {
if index.FieldID == fieldID && !index.IsDeleted { if index.FieldID == fieldID && !index.IsDeleted {
if segIdx, ok := segment.segmentIndexes[indexID]; ok { if segIdx, ok := indexes[indexID]; ok {
state.IndexName = index.IndexName state.IndexName = index.IndexName
state.State = segIdx.IndexState state.State = segIdx.IndexState
state.FailReason = segIdx.FailReason state.FailReason = segIdx.FailReason
@ -343,7 +413,7 @@ func (m *meta) GetSegmentIndexStateOnField(collID, segmentID, fieldID UniqueID)
} }
// GetIndexesForCollection gets all indexes info with the specified collection. // GetIndexesForCollection gets all indexes info with the specified collection.
func (m *meta) GetIndexesForCollection(collID UniqueID, indexName string) []*model.Index { func (m *indexMeta) GetIndexesForCollection(collID UniqueID, indexName string) []*model.Index {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -359,7 +429,7 @@ func (m *meta) GetIndexesForCollection(collID UniqueID, indexName string) []*mod
return indexInfos return indexInfos
} }
func (m *meta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) []*model.Index { func (m *indexMeta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) []*model.Index {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -376,7 +446,7 @@ func (m *meta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) []*mo
} }
// MarkIndexAsDeleted will mark the corresponding index as deleted, and recycleUnusedIndexFiles will recycle these tasks. // MarkIndexAsDeleted will mark the corresponding index as deleted, and recycleUnusedIndexFiles will recycle these tasks.
func (m *meta) MarkIndexAsDeleted(collID UniqueID, indexIDs []UniqueID) error { func (m *indexMeta) MarkIndexAsDeleted(collID UniqueID, indexIDs []UniqueID) error {
log.Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID), log.Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID),
zap.Int64s("indexIDs", indexIDs)) zap.Int64s("indexIDs", indexIDs))
@ -413,29 +483,73 @@ func (m *meta) MarkIndexAsDeleted(collID UniqueID, indexIDs []UniqueID) error {
return nil return nil
} }
func (m *meta) GetSegmentIndexes(segID UniqueID) []*model.SegmentIndex { func (m *indexMeta) IsUnIndexedSegment(collectionID UniqueID, segID UniqueID) bool {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
segIndexInfos := make([]*model.SegmentIndex, 0) fieldIndexes, ok := m.indexes[collectionID]
segment := m.segments.GetSegment(segID)
if segment == nil {
return segIndexInfos
}
fieldIndex, ok := m.indexes[segment.CollectionID]
if !ok { if !ok {
return segIndexInfos return false
} }
for _, segIdx := range segment.segmentIndexes { // the segment should be unindexed status if the fieldIndexes is not nil
if index, ok := fieldIndex[segIdx.IndexID]; ok && !index.IsDeleted { segIndexInfos, ok := m.segmentIndexes[segID]
segIndexInfos = append(segIndexInfos, model.CloneSegmentIndex(segIdx)) if !ok || len(segIndexInfos) == 0 {
} return true
}
return segIndexInfos
} }
func (m *meta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID { for _, index := range fieldIndexes {
if _, ok := segIndexInfos[index.IndexID]; !index.IsDeleted {
if !ok {
// the segment should be unindexed status if the segment index is not found within field indexes
return true
}
}
}
return false
}
func (m *indexMeta) getSegmentIndexes(segID UniqueID) map[UniqueID]*model.SegmentIndex {
m.RLock()
defer m.RUnlock()
ret := make(map[UniqueID]*model.SegmentIndex, 0)
segIndexInfos, ok := m.segmentIndexes[segID]
if !ok || len(segIndexInfos) == 0 {
return ret
}
for _, segIdx := range segIndexInfos {
ret[segIdx.IndexID] = model.CloneSegmentIndex(segIdx)
}
return ret
}
func (m *indexMeta) GetSegmentIndexes(collectionID UniqueID, segID UniqueID) map[UniqueID]*model.SegmentIndex {
m.RLock()
defer m.RUnlock()
ret := make(map[UniqueID]*model.SegmentIndex, 0)
segIndexInfos, ok := m.segmentIndexes[segID]
if !ok || len(segIndexInfos) == 0 {
return ret
}
fieldIndexes, ok := m.indexes[collectionID]
if !ok {
return ret
}
for _, segIdx := range segIndexInfos {
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
ret[segIdx.IndexID] = model.CloneSegmentIndex(segIdx)
}
}
return ret
}
func (m *indexMeta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -447,7 +561,7 @@ func (m *meta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
return 0 return 0
} }
func (m *meta) GetIndexNameByID(collID, indexID UniqueID) string { func (m *indexMeta) GetIndexNameByID(collID, indexID UniqueID) string {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
if fieldIndexes, ok := m.indexes[collID]; ok { if fieldIndexes, ok := m.indexes[collID]; ok {
@ -458,7 +572,7 @@ func (m *meta) GetIndexNameByID(collID, indexID UniqueID) string {
return "" return ""
} }
func (m *meta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValuePair { func (m *indexMeta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -479,7 +593,7 @@ func (m *meta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValuePair
return indexParams return indexParams
} }
func (m *meta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValuePair { func (m *indexMeta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -500,7 +614,7 @@ func (m *meta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValuePair
return typeParams return typeParams
} }
func (m *meta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) { func (m *indexMeta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -512,7 +626,7 @@ func (m *meta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
return nil, false return nil, false
} }
func (m *meta) IsIndexExist(collID, indexID UniqueID) bool { func (m *indexMeta) IsIndexExist(collID, indexID UniqueID) bool {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -528,7 +642,7 @@ func (m *meta) IsIndexExist(collID, indexID UniqueID) bool {
} }
// UpdateVersion updates the version and nodeID of the index meta, whenever the task is built once, the version will be updated once. // UpdateVersion updates the version and nodeID of the index meta, whenever the task is built once, the version will be updated once.
func (m *meta) UpdateVersion(buildID UniqueID, nodeID UniqueID) error { func (m *indexMeta) UpdateVersion(buildID UniqueID, nodeID UniqueID) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -547,7 +661,7 @@ func (m *meta) UpdateVersion(buildID UniqueID, nodeID UniqueID) error {
return m.updateSegIndexMeta(segIdx, updateFunc) return m.updateSegIndexMeta(segIdx, updateFunc)
} }
func (m *meta) FinishTask(taskInfo *indexpb.IndexTaskInfo) error { func (m *indexMeta) FinishTask(taskInfo *indexpb.IndexTaskInfo) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -578,7 +692,7 @@ func (m *meta) FinishTask(taskInfo *indexpb.IndexTaskInfo) error {
return nil return nil
} }
func (m *meta) DeleteTask(buildID int64) error { func (m *indexMeta) DeleteTask(buildID int64) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -603,7 +717,7 @@ func (m *meta) DeleteTask(buildID int64) error {
} }
// BuildIndex set the index state to be InProgress. It means IndexNode is building the index. // BuildIndex set the index state to be InProgress. It means IndexNode is building the index.
func (m *meta) BuildIndex(buildID UniqueID) error { func (m *indexMeta) BuildIndex(buildID UniqueID) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -632,7 +746,7 @@ func (m *meta) BuildIndex(buildID UniqueID) error {
return nil return nil
} }
func (m *meta) GetAllSegIndexes() map[int64]*model.SegmentIndex { func (m *indexMeta) GetAllSegIndexes() map[int64]*model.SegmentIndex {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -643,7 +757,7 @@ func (m *meta) GetAllSegIndexes() map[int64]*model.SegmentIndex {
return segIndexes return segIndexes
} }
func (m *meta) RemoveSegmentIndex(collID, partID, segID, indexID, buildID UniqueID) error { func (m *indexMeta) RemoveSegmentIndex(collID, partID, segID, indexID, buildID UniqueID) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -652,13 +766,20 @@ func (m *meta) RemoveSegmentIndex(collID, partID, segID, indexID, buildID Unique
return err return err
} }
m.segments.DropSegmentIndex(segID, indexID) if _, ok := m.segmentIndexes[segID]; ok {
delete(m.segmentIndexes[segID], indexID)
}
if len(m.segmentIndexes[segID]) == 0 {
delete(m.segmentIndexes, segID)
}
delete(m.buildID2SegmentIndex, buildID) delete(m.buildID2SegmentIndex, buildID)
m.updateIndexTasksMetrics() m.updateIndexTasksMetrics()
return nil return nil
} }
func (m *meta) GetDeletedIndexes() []*model.Index { func (m *indexMeta) GetDeletedIndexes() []*model.Index {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -673,7 +794,7 @@ func (m *meta) GetDeletedIndexes() []*model.Index {
return deletedIndexes return deletedIndexes
} }
func (m *meta) RemoveIndex(collID, indexID UniqueID) error { func (m *indexMeta) RemoveIndex(collID, indexID UniqueID) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
log.Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID)) log.Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID))
@ -696,7 +817,7 @@ func (m *meta) RemoveIndex(collID, indexID UniqueID) error {
return nil return nil
} }
func (m *meta) CleanSegmentIndex(buildID UniqueID) (bool, *model.SegmentIndex) { func (m *indexMeta) CleanSegmentIndex(buildID UniqueID) (bool, *model.SegmentIndex) {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()
@ -709,28 +830,7 @@ func (m *meta) CleanSegmentIndex(buildID UniqueID) (bool, *model.SegmentIndex) {
return true, nil return true, nil
} }
func (m *meta) GetHasUnindexTaskSegments() []*SegmentInfo { func (m *indexMeta) GetMetasByNodeID(nodeID UniqueID) []*model.SegmentIndex {
m.RLock()
defer m.RUnlock()
segments := m.segments.GetSegments()
unindexedSegments := make(map[int64]*SegmentInfo)
for _, segment := range segments {
if !isFlush(segment) {
continue
}
if fieldIndexes, ok := m.indexes[segment.CollectionID]; ok {
for _, index := range fieldIndexes {
if _, ok := segment.segmentIndexes[index.IndexID]; !index.IsDeleted && !ok {
unindexedSegments[segment.GetID()] = segment
}
}
}
}
return lo.MapToSlice(unindexedSegments, func(_ int64, segment *SegmentInfo) *SegmentInfo { return segment })
}
func (m *meta) GetMetasByNodeID(nodeID UniqueID) []*model.SegmentIndex {
m.RLock() m.RLock()
defer m.RUnlock() defer m.RUnlock()

View File

@ -21,7 +21,6 @@ import (
"context" "context"
"sync" "sync"
"testing" "testing"
"time"
"github.com/cockroachdb/errors" "github.com/cockroachdb/errors"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -29,14 +28,55 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
mockkv "github.com/milvus-io/milvus/internal/kv/mocks" mockkv "github.com/milvus-io/milvus/internal/kv/mocks"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/metastore/kv/datacoord" "github.com/milvus-io/milvus/internal/metastore/kv/datacoord"
catalogmocks "github.com/milvus-io/milvus/internal/metastore/mocks" catalogmocks "github.com/milvus-io/milvus/internal/metastore/mocks"
"github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/indexpb" "github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/common"
) )
func TestReloadFromKV(t *testing.T) {
t.Run("ListIndexes_fail", func(t *testing.T) {
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, errors.New("mock"))
_, err := newIndexMeta(context.TODO(), catalog)
assert.Error(t, err)
})
t.Run("ListSegmentIndexes_fails", func(t *testing.T) {
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil)
catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, errors.New("mock"))
_, err := newIndexMeta(context.TODO(), catalog)
assert.Error(t, err)
})
t.Run("ok", func(t *testing.T) {
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{
{
CollectionID: 1,
IndexID: 1,
IndexName: "dix",
CreateTime: 1,
},
}, nil)
catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{
{
SegmentID: 1,
IndexID: 1,
},
}, nil)
meta, err := newIndexMeta(context.TODO(), catalog)
assert.NoError(t, err)
assert.NotNil(t, meta)
})
}
func TestMeta_CanCreateIndex(t *testing.T) { func TestMeta_CanCreateIndex(t *testing.T) {
var ( var (
collID = UniqueID(1) collID = UniqueID(1)
@ -64,17 +104,7 @@ func TestMeta_CanCreateIndex(t *testing.T) {
mock.Anything, mock.Anything,
).Return(nil) ).Return(nil)
m := &meta{ m := newSegmentIndexMeta(catalog)
RWMutex: sync.RWMutex{},
ctx: context.Background(),
catalog: catalog,
collections: nil,
segments: nil,
channelCPs: nil,
chunkManager: nil,
indexes: map[UniqueID]map[UniqueID]*model.Index{},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{},
}
req := &indexpb.CreateIndexRequest{ req := &indexpb.CreateIndexRequest{
CollectionID: collID, CollectionID: collID,
@ -180,17 +210,8 @@ func TestMeta_HasSameReq(t *testing.T) {
}, },
} }
) )
m := &meta{
RWMutex: sync.RWMutex{}, m := newSegmentIndexMeta(catalogmocks.NewDataCoordCatalog(t))
ctx: context.Background(),
catalog: catalogmocks.NewDataCoordCatalog(t),
collections: nil,
segments: nil,
channelCPs: nil,
chunkManager: nil,
indexes: map[UniqueID]map[UniqueID]*model.Index{},
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{},
}
req := &indexpb.CreateIndexRequest{ req := &indexpb.CreateIndexRequest{
CollectionID: collID, CollectionID: collID,
@ -241,6 +262,17 @@ func TestMeta_HasSameReq(t *testing.T) {
}) })
} }
func newSegmentIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
return &indexMeta{
RWMutex: sync.RWMutex{},
ctx: context.Background(),
catalog: catalog,
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
}
}
func TestMeta_CreateIndex(t *testing.T) { func TestMeta_CreateIndex(t *testing.T) {
indexParams := []*commonpb.KeyValuePair{ indexParams := []*commonpb.KeyValuePair{
{ {
@ -274,14 +306,7 @@ func TestMeta_CreateIndex(t *testing.T) {
mock.Anything, mock.Anything,
).Return(nil) ).Return(nil)
m := &meta{ m := newSegmentIndexMeta(sc)
RWMutex: sync.RWMutex{},
ctx: context.Background(),
catalog: sc,
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
}
err := m.CreateIndex(index) err := m.CreateIndex(index)
assert.NoError(t, err) assert.NoError(t, err)
}) })
@ -293,14 +318,7 @@ func TestMeta_CreateIndex(t *testing.T) {
mock.Anything, mock.Anything,
).Return(errors.New("fail")) ).Return(errors.New("fail"))
m := &meta{ m := newSegmentIndexMeta(ec)
RWMutex: sync.RWMutex{},
ctx: context.Background(),
catalog: ec,
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
}
err := m.CreateIndex(index) err := m.CreateIndex(index)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -319,25 +337,9 @@ func TestMeta_AddSegmentIndex(t *testing.T) {
mock.Anything, mock.Anything,
).Return(errors.New("fail")) ).Return(errors.New("fail"))
m := &meta{ m := newSegmentIndexMeta(ec)
RWMutex: sync.RWMutex{}, m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
ctx: context.Background(), 1: make(map[UniqueID]*model.SegmentIndex, 0),
catalog: ec,
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
1: {
SegmentInfo: nil,
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
currRows: 0,
allocations: nil,
lastFlushTime: time.Time{},
isCompacting: false,
lastWrittenTime: time.Time{},
},
},
},
} }
segmentIndex := &model.SegmentIndex{ segmentIndex := &model.SegmentIndex{
@ -393,14 +395,8 @@ func TestMeta_GetIndexIDByName(t *testing.T) {
metakv.EXPECT().Save(mock.Anything, mock.Anything).Return(errors.New("failed")).Maybe() metakv.EXPECT().Save(mock.Anything, mock.Anything).Return(errors.New("failed")).Maybe()
metakv.EXPECT().MultiSave(mock.Anything).Return(errors.New("failed")).Maybe() metakv.EXPECT().MultiSave(mock.Anything).Return(errors.New("failed")).Maybe()
metakv.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, nil).Maybe() metakv.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, nil).Maybe()
m := &meta{
RWMutex: sync.RWMutex{},
ctx: context.Background(),
catalog: &datacoord.Catalog{MetaKv: metakv},
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
}
m := newSegmentIndexMeta(&datacoord.Catalog{MetaKv: metakv})
t.Run("no indexes", func(t *testing.T) { t.Run("no indexes", func(t *testing.T) {
indexID2CreateTS := m.GetIndexIDByName(collID, indexName) indexID2CreateTS := m.GetIndexIDByName(collID, indexName)
assert.Equal(t, 0, len(indexID2CreateTS)) assert.Equal(t, 0, len(indexID2CreateTS))
@ -454,25 +450,10 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
metakv.EXPECT().Save(mock.Anything, mock.Anything).Return(errors.New("failed")).Maybe() metakv.EXPECT().Save(mock.Anything, mock.Anything).Return(errors.New("failed")).Maybe()
metakv.EXPECT().MultiSave(mock.Anything).Return(errors.New("failed")).Maybe() metakv.EXPECT().MultiSave(mock.Anything).Return(errors.New("failed")).Maybe()
metakv.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, nil).Maybe() metakv.EXPECT().LoadWithPrefix(mock.Anything).Return(nil, nil, nil).Maybe()
m := &meta{
RWMutex: sync.RWMutex{}, m := newSegmentIndexMeta(&datacoord.Catalog{MetaKv: metakv})
ctx: context.Background(), m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
catalog: &datacoord.Catalog{MetaKv: metakv}, segID: make(map[UniqueID]*model.SegmentIndex, 0),
indexes: map[UniqueID]map[UniqueID]*model.Index{},
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: nil,
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
currRows: 0,
allocations: nil,
lastFlushTime: time.Time{},
isCompacting: false,
lastWrittenTime: time.Time{},
},
},
},
} }
t.Run("collection has no index", func(t *testing.T) { t.Run("collection has no index", func(t *testing.T) {
@ -503,12 +484,12 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
t.Run("segment not exist", func(t *testing.T) { t.Run("segment not exist", func(t *testing.T) {
state := m.GetSegmentIndexState(collID, segID+1, indexID) state := m.GetSegmentIndexState(collID, segID+1, indexID)
assert.Equal(t, commonpb.IndexState_IndexStateNone, state.GetState()) assert.Equal(t, commonpb.IndexState_Unissued, state.GetState())
assert.Contains(t, state.FailReason, "segment is not exist with ID") assert.Contains(t, state.FailReason, "segment index not exist with ID")
}) })
t.Run("unissued", func(t *testing.T) { t.Run("unissued", func(t *testing.T) {
m.segments.SetSegmentIndex(segID, &model.SegmentIndex{ m.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
@ -530,7 +511,7 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
}) })
t.Run("finish", func(t *testing.T) { t.Run("finish", func(t *testing.T) {
m.segments.SetSegmentIndex(segID, &model.SegmentIndex{ m.updateSegmentIndex(&model.SegmentIndex{
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
@ -574,16 +555,10 @@ func TestMeta_GetSegmentIndexStateOnField(t *testing.T) {
}, },
} }
) )
m := &meta{
RWMutex: sync.RWMutex{}, m := newSegmentIndexMeta(nil)
ctx: context.Background(), m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
catalog: nil,
collections: nil,
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -601,12 +576,8 @@ func TestMeta_GetSegmentIndexStateOnField(t *testing.T) {
IndexSize: 0, IndexSize: 0,
}, },
}, },
}, }
}, m.indexes = map[UniqueID]map[UniqueID]*model.Index{
},
channelCPs: nil,
chunkManager: nil,
indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -622,8 +593,8 @@ func TestMeta_GetSegmentIndexStateOnField(t *testing.T) {
UserIndexParams: indexParams, UserIndexParams: indexParams,
}, },
}, },
}, }
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{ m.buildID2SegmentIndex = map[UniqueID]*model.SegmentIndex{
buildID: { buildID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -640,7 +611,6 @@ func TestMeta_GetSegmentIndexStateOnField(t *testing.T) {
IndexFileKeys: nil, IndexFileKeys: nil,
IndexSize: 0, IndexSize: 0,
}, },
},
} }
t.Run("success", func(t *testing.T) { t.Run("success", func(t *testing.T) {
@ -660,7 +630,7 @@ func TestMeta_GetSegmentIndexStateOnField(t *testing.T) {
t.Run("segment not exist", func(t *testing.T) { t.Run("segment not exist", func(t *testing.T) {
state := m.GetSegmentIndexStateOnField(collID, segID+1, fieldID) state := m.GetSegmentIndexStateOnField(collID, segID+1, fieldID)
assert.Equal(t, commonpb.IndexState_IndexStateNone, state.GetState()) assert.Equal(t, commonpb.IndexState_Unissued, state.GetState())
}) })
} }
@ -676,9 +646,8 @@ func TestMeta_MarkIndexAsDeleted(t *testing.T) {
mock.Anything, mock.Anything,
).Return(errors.New("fail")) ).Return(errors.New("fail"))
m := &meta{ m := newSegmentIndexMeta(sc)
catalog: sc, m.indexes = map[UniqueID]map[UniqueID]*model.Index{
indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -707,7 +676,6 @@ func TestMeta_MarkIndexAsDeleted(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}, },
}, },
},
} }
t.Run("fail", func(t *testing.T) { t.Run("fail", func(t *testing.T) {
@ -733,42 +701,44 @@ func TestMeta_GetSegmentIndexes(t *testing.T) {
m := createMetaTable(&datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}) m := createMetaTable(&datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)})
t.Run("success", func(t *testing.T) { t.Run("success", func(t *testing.T) {
segIndexes := m.GetSegmentIndexes(segID) segIndexes := m.indexMeta.getSegmentIndexes(segID)
assert.Equal(t, 1, len(segIndexes)) assert.Equal(t, 1, len(segIndexes))
}) })
t.Run("segment not exist", func(t *testing.T) { t.Run("segment not exist", func(t *testing.T) {
segIndexes := m.GetSegmentIndexes(segID + 100) segIndexes := m.indexMeta.getSegmentIndexes(segID + 100)
assert.Equal(t, 0, len(segIndexes)) assert.Equal(t, 0, len(segIndexes))
}) })
t.Run("no index exist", func(t *testing.T) { t.Run("no index exist- segment index empty", func(t *testing.T) {
m = &meta{ m := newSegmentIndexMeta(nil)
RWMutex: sync.RWMutex{}, segIndexes := m.GetSegmentIndexes(collID, segID)
segments: &SegmentsInfo{ assert.Equal(t, 0, len(segIndexes))
segments: map[UniqueID]*SegmentInfo{ })
t.Run("no index exist- field index empty", func(t *testing.T) {
m := newSegmentIndexMeta(nil)
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
1: {
1: &model.SegmentIndex{},
},
}
segIndexes := m.GetSegmentIndexes(collID, 1)
assert.Equal(t, 0, len(segIndexes))
})
t.Run("index exists", func(t *testing.T) {
m := &indexMeta{
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{ indexID: &model.SegmentIndex{
ID: segID,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, SegmentID: segID,
NumOfRows: 0, IndexID: indexID,
State: commonpb.SegmentState_Flushed, IndexState: commonpb.IndexState_Finished,
}, },
}, },
}, },
},
indexes: nil,
buildID2SegmentIndex: nil,
}
segIndexes := m.GetSegmentIndexes(segID)
assert.Equal(t, 0, len(segIndexes))
})
}
func TestMeta_GetFieldIDByIndexID(t *testing.T) {
m := &meta{
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
@ -787,6 +757,34 @@ func TestMeta_GetFieldIDByIndexID(t *testing.T) {
}, },
}, },
} }
segIndexes := m.GetSegmentIndexes(collID, segID)
assert.Equal(t, 1, len(segIndexes))
segIdx, ok := segIndexes[indexID]
assert.True(t, ok)
assert.NotNil(t, segIdx)
})
}
func TestMeta_GetFieldIDByIndexID(t *testing.T) {
m := newSegmentIndexMeta(nil)
m.indexes = map[UniqueID]map[UniqueID]*model.Index{
collID: {
indexID: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
IndexName: indexName,
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: nil,
IsAutoIndex: false,
UserIndexParams: nil,
},
},
}
t.Run("success", func(t *testing.T) { t.Run("success", func(t *testing.T) {
fID := m.GetFieldIDByIndexID(collID, indexID) fID := m.GetFieldIDByIndexID(collID, indexID)
@ -800,8 +798,8 @@ func TestMeta_GetFieldIDByIndexID(t *testing.T) {
} }
func TestMeta_GetIndexNameByID(t *testing.T) { func TestMeta_GetIndexNameByID(t *testing.T) {
m := &meta{ m := newSegmentIndexMeta(nil)
indexes: map[UniqueID]map[UniqueID]*model.Index{ m.indexes = map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -817,7 +815,6 @@ func TestMeta_GetIndexNameByID(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}, },
}, },
},
} }
t.Run("success", func(t *testing.T) { t.Run("success", func(t *testing.T) {
@ -838,8 +835,9 @@ func TestMeta_GetTypeParams(t *testing.T) {
Value: "HNSW", Value: "HNSW",
}, },
} }
m := &meta{
indexes: map[UniqueID]map[UniqueID]*model.Index{ m := newSegmentIndexMeta(nil)
m.indexes = map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -860,7 +858,6 @@ func TestMeta_GetTypeParams(t *testing.T) {
UserIndexParams: indexParams, UserIndexParams: indexParams,
}, },
}, },
},
} }
t.Run("success", func(t *testing.T) { t.Run("success", func(t *testing.T) {
@ -884,8 +881,9 @@ func TestMeta_GetIndexParams(t *testing.T) {
Value: "HNSW", Value: "HNSW",
}, },
} }
m := &meta{
indexes: map[UniqueID]map[UniqueID]*model.Index{ m := newSegmentIndexMeta(nil)
m.indexes = map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -906,7 +904,6 @@ func TestMeta_GetIndexParams(t *testing.T) {
UserIndexParams: indexParams, UserIndexParams: indexParams,
}, },
}, },
},
} }
t.Run("success", func(t *testing.T) { t.Run("success", func(t *testing.T) {
@ -924,8 +921,8 @@ func TestMeta_GetIndexParams(t *testing.T) {
} }
func TestMeta_GetIndexJob(t *testing.T) { func TestMeta_GetIndexJob(t *testing.T) {
m := &meta{ m := newSegmentIndexMeta(nil)
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{ m.buildID2SegmentIndex = map[UniqueID]*model.SegmentIndex{
buildID: { buildID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -942,7 +939,6 @@ func TestMeta_GetIndexJob(t *testing.T) {
IndexFileKeys: nil, IndexFileKeys: nil,
IndexSize: 0, IndexSize: 0,
}, },
},
} }
t.Run("exist", func(t *testing.T) { t.Run("exist", func(t *testing.T) {
@ -959,8 +955,8 @@ func TestMeta_GetIndexJob(t *testing.T) {
} }
func TestMeta_IsIndexExist(t *testing.T) { func TestMeta_IsIndexExist(t *testing.T) {
m := &meta{ m := newSegmentIndexMeta(nil)
indexes: map[UniqueID]map[UniqueID]*model.Index{ m.indexes = map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -989,7 +985,6 @@ func TestMeta_IsIndexExist(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}, },
}, },
},
} }
t.Run("exist", func(t *testing.T) { t.Run("exist", func(t *testing.T) {
@ -1009,27 +1004,17 @@ func TestMeta_IsIndexExist(t *testing.T) {
}) })
} }
func updateSegmentIndexMeta(t *testing.T) *meta { func updateSegmentIndexMeta(t *testing.T) *indexMeta {
sc := catalogmocks.NewDataCoordCatalog(t) sc := catalogmocks.NewDataCoordCatalog(t)
sc.On("AlterSegmentIndexes", sc.On("AlterSegmentIndexes",
mock.Anything, mock.Anything,
mock.Anything, mock.Anything,
).Return(nil) ).Return(nil)
return &meta{ return &indexMeta{
catalog: sc, catalog: sc,
segments: &SegmentsInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segments: map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Flushed,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -1048,8 +1033,6 @@ func updateSegmentIndexMeta(t *testing.T) *meta {
}, },
}, },
}, },
},
},
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
@ -1181,128 +1164,25 @@ func TestMeta_BuildIndex(t *testing.T) {
}) })
} }
func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
m := &meta{
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Flushed,
},
},
segID + 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Growing,
},
},
segID + 2: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 2,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Dropped,
},
},
},
},
indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: {
indexID: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
IndexName: indexName,
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: nil,
IsAutoIndex: false,
UserIndexParams: nil,
},
indexID + 1: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID + 1,
IndexID: indexID + 1,
IndexName: indexName + "_1",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: nil,
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
}
t.Run("normal", func(t *testing.T) {
segments := m.GetHasUnindexTaskSegments()
assert.Equal(t, 1, len(segments))
assert.Equal(t, segID, segments[0].ID)
})
t.Run("segment partial field with index", func(t *testing.T) {
m.segments.segments[segID].segmentIndexes = map[UniqueID]*model.SegmentIndex{
indexID: {
CollectionID: collID,
SegmentID: segID,
IndexID: indexID,
IndexState: commonpb.IndexState_Finished,
},
}
segments := m.GetHasUnindexTaskSegments()
assert.Equal(t, 1, len(segments))
assert.Equal(t, segID, segments[0].ID)
})
t.Run("segment all vector field with index", func(t *testing.T) {
m.segments.segments[segID].segmentIndexes[indexID+1] = &model.SegmentIndex{
CollectionID: collID,
SegmentID: segID,
IndexID: indexID + 1,
IndexState: commonpb.IndexState_Finished,
}
segments := m.GetHasUnindexTaskSegments()
assert.Equal(t, 0, len(segments))
})
}
// see also: https://github.com/milvus-io/milvus/issues/21660 // see also: https://github.com/milvus-io/milvus/issues/21660
func TestUpdateSegmentIndexNotExists(t *testing.T) { func TestUpdateSegmentIndexNotExists(t *testing.T) {
m := &meta{ m := newSegmentIndexMeta(nil)
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{},
},
indexes: map[UniqueID]map[UniqueID]*model.Index{},
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
}
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
m.updateSegmentIndex(&model.SegmentIndex{ m.updateSegmentIndex(&model.SegmentIndex{
SegmentID: 1, SegmentID: 1,
IndexID: 2, IndexID: 2,
}) })
}) })
assert.Equal(t, 1, len(m.segmentIndexes))
segmentIdx := m.segmentIndexes[1]
assert.Equal(t, 1, len(segmentIdx))
_, ok := segmentIdx[2]
assert.True(t, ok)
} }
func TestMeta_DeleteTask_Error(t *testing.T) { func TestMeta_DeleteTask_Error(t *testing.T) {
m := &meta{buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex)} m := newSegmentIndexMeta(nil)
t.Run("segment index not found", func(t *testing.T) { t.Run("segment index not found", func(t *testing.T) {
err := m.DeleteTask(buildID) err := m.DeleteTask(buildID)
assert.NoError(t, err) assert.NoError(t, err)
@ -1328,8 +1208,8 @@ func TestMeta_DeleteTask_Error(t *testing.T) {
} }
func TestMeta_GetFieldIndexes(t *testing.T) { func TestMeta_GetFieldIndexes(t *testing.T) {
m := &meta{ m := newSegmentIndexMeta(nil)
indexes: map[UniqueID]map[UniqueID]*model.Index{ m.indexes = map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
indexID: { indexID: {
TenantID: "", TenantID: "",
@ -1371,10 +1251,85 @@ func TestMeta_GetFieldIndexes(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}, },
}, },
},
} }
indexes := m.GetFieldIndexes(collID, fieldID, "") indexes := m.GetFieldIndexes(collID, fieldID, "")
assert.Equal(t, 1, len(indexes)) assert.Equal(t, 1, len(indexes))
assert.Equal(t, indexName, indexes[0].IndexName) assert.Equal(t, indexName, indexes[0].IndexName)
} }
func TestRemoveIndex(t *testing.T) {
t.Run("drop index fail", func(t *testing.T) {
expectedErr := errors.New("error")
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().
DropIndex(mock.Anything, mock.Anything, mock.Anything).
Return(expectedErr)
m := newSegmentIndexMeta(catalog)
err := m.RemoveIndex(collID, indexID)
assert.Error(t, err)
assert.EqualError(t, err, "error")
})
t.Run("remove index ok", func(t *testing.T) {
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().
DropIndex(mock.Anything, mock.Anything, mock.Anything).
Return(nil)
m := &indexMeta{
catalog: catalog,
indexes: map[int64]map[int64]*model.Index{
collID: {
indexID: &model.Index{},
},
},
}
err := m.RemoveIndex(collID, indexID)
assert.NoError(t, err)
assert.Equal(t, len(m.indexes), 0)
})
}
func TestRemoveSegmentIndex(t *testing.T) {
t.Run("drop segment index fail", func(t *testing.T) {
expectedErr := errors.New("error")
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().
DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(expectedErr)
m := newSegmentIndexMeta(catalog)
err := m.RemoveSegmentIndex(0, 0, 0, 0, 0)
assert.Error(t, err)
assert.EqualError(t, err, "error")
})
t.Run("remove segment index ok", func(t *testing.T) {
catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.EXPECT().
DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(nil)
m := &indexMeta{
catalog: catalog,
segmentIndexes: map[int64]map[int64]*model.SegmentIndex{
segID: {
indexID: &model.SegmentIndex{},
},
},
buildID2SegmentIndex: map[int64]*model.SegmentIndex{
buildID: {},
},
}
err := m.RemoveSegmentIndex(collID, partID, segID, indexID, buildID)
assert.NoError(t, err)
assert.Equal(t, len(m.segmentIndexes), 0)
assert.Equal(t, len(m.buildID2SegmentIndex), 0)
})
}

View File

@ -70,7 +70,7 @@ func (s *Server) createIndexForSegment(segment *SegmentInfo, indexID UniqueID) e
CreateTime: uint64(segment.ID), CreateTime: uint64(segment.ID),
WriteHandoff: false, WriteHandoff: false,
} }
if err = s.meta.AddSegmentIndex(segIndex); err != nil { if err = s.meta.indexMeta.AddSegmentIndex(segIndex); err != nil {
return err return err
} }
s.indexBuilder.enqueue(buildID) s.indexBuilder.enqueue(buildID)
@ -78,9 +78,10 @@ func (s *Server) createIndexForSegment(segment *SegmentInfo, indexID UniqueID) e
} }
func (s *Server) createIndexesForSegment(segment *SegmentInfo) error { func (s *Server) createIndexesForSegment(segment *SegmentInfo) error {
indexes := s.meta.GetIndexesForCollection(segment.CollectionID, "") indexes := s.meta.indexMeta.GetIndexesForCollection(segment.CollectionID, "")
indexIDToSegIndexes := s.meta.indexMeta.GetSegmentIndexes(segment.CollectionID, segment.ID)
for _, index := range indexes { for _, index := range indexes {
if _, ok := segment.segmentIndexes[index.IndexID]; !ok { if _, ok := indexIDToSegIndexes[index.IndexID]; !ok {
if err := s.createIndexForSegment(segment, index.IndexID); err != nil { if err := s.createIndexForSegment(segment, index.IndexID); err != nil {
log.Warn("create index for segment fail", zap.Int64("segmentID", segment.ID), log.Warn("create index for segment fail", zap.Int64("segmentID", segment.ID),
zap.Int64("indexID", index.IndexID)) zap.Int64("indexID", index.IndexID))
@ -91,6 +92,20 @@ func (s *Server) createIndexesForSegment(segment *SegmentInfo) error {
return nil return nil
} }
func (s *Server) getUnIndexTaskSegments() []*SegmentInfo {
flushedSegments := s.meta.SelectSegments(func(seg *SegmentInfo) bool {
return isFlush(seg)
})
unindexedSegments := make([]*SegmentInfo, 0)
for _, segment := range flushedSegments {
if s.meta.indexMeta.IsUnIndexedSegment(segment.CollectionID, segment.GetID()) {
unindexedSegments = append(unindexedSegments, segment)
}
}
return unindexedSegments
}
func (s *Server) createIndexForSegmentLoop(ctx context.Context) { func (s *Server) createIndexForSegmentLoop(ctx context.Context) {
log.Info("start create index for segment loop...") log.Info("start create index for segment loop...")
defer s.serverLoopWg.Done() defer s.serverLoopWg.Done()
@ -103,7 +118,7 @@ func (s *Server) createIndexForSegmentLoop(ctx context.Context) {
log.Warn("DataCoord context done, exit...") log.Warn("DataCoord context done, exit...")
return return
case <-ticker.C: case <-ticker.C:
segments := s.meta.GetHasUnindexTaskSegments() segments := s.getUnIndexTaskSegments()
for _, segment := range segments { for _, segment := range segments {
if err := s.createIndexesForSegment(segment); err != nil { if err := s.createIndexesForSegment(segment); err != nil {
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID)) log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
@ -171,7 +186,7 @@ func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexReques
metrics.IndexRequestCounter.WithLabelValues(metrics.TotalLabel).Inc() metrics.IndexRequestCounter.WithLabelValues(metrics.TotalLabel).Inc()
if req.GetIndexName() == "" { if req.GetIndexName() == "" {
indexes := s.meta.GetFieldIndexes(req.GetCollectionID(), req.GetFieldID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetFieldIndexes(req.GetCollectionID(), req.GetFieldID(), req.GetIndexName())
if len(indexes) == 0 { if len(indexes) == 0 {
fieldName, err := s.getFieldNameByID(ctx, req.GetCollectionID(), req.GetFieldID()) fieldName, err := s.getFieldNameByID(ctx, req.GetCollectionID(), req.GetFieldID())
if err != nil { if err != nil {
@ -184,7 +199,7 @@ func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexReques
} }
} }
indexID, err := s.meta.CanCreateIndex(req) indexID, err := s.meta.indexMeta.CanCreateIndex(req)
if err != nil { if err != nil {
metrics.IndexRequestCounter.WithLabelValues(metrics.FailLabel).Inc() metrics.IndexRequestCounter.WithLabelValues(metrics.FailLabel).Inc()
return merr.Status(err), nil return merr.Status(err), nil
@ -219,8 +234,7 @@ func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexReques
} }
// Get flushed segments and create index // Get flushed segments and create index
err = s.meta.indexMeta.CreateIndex(index)
err = s.meta.CreateIndex(index)
if err != nil { if err != nil {
log.Error("CreateIndex fail", log.Error("CreateIndex fail",
zap.Int64("fieldID", req.GetFieldID()), zap.String("indexName", req.GetIndexName()), zap.Error(err)) zap.Int64("fieldID", req.GetFieldID()), zap.String("indexName", req.GetIndexName()), zap.Error(err))
@ -290,11 +304,12 @@ func (s *Server) AlterIndex(ctx context.Context, req *indexpb.AlterIndexRequest)
return merr.Status(err), nil return merr.Status(err), nil
} }
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if req.GetIndexName() != "" && len(indexes) == 0 { if req.GetIndexName() != "" && len(indexes) == 0 {
err := merr.WrapErrIndexNotFound(req.GetIndexName()) err := merr.WrapErrIndexNotFound(req.GetIndexName())
return merr.Status(err), nil return merr.Status(err), nil
} }
for _, index := range indexes { for _, index := range indexes {
// update user index params // update user index params
newUserIndexParams, err := UpdateParams(index, index.UserIndexParams, req.GetParams()) newUserIndexParams, err := UpdateParams(index, index.UserIndexParams, req.GetParams())
@ -319,7 +334,7 @@ func (s *Server) AlterIndex(ctx context.Context, req *indexpb.AlterIndexRequest)
index.IndexParams = newIndexParams index.IndexParams = newIndexParams
} }
err := s.meta.AlterIndex(ctx, indexes...) err := s.meta.indexMeta.AlterIndex(ctx, indexes...)
if err != nil { if err != nil {
log.Warn("failed to alter index", zap.Error(err)) log.Warn("failed to alter index", zap.Error(err))
return merr.Status(err), nil return merr.Status(err), nil
@ -344,7 +359,7 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
}, nil }, nil
} }
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if len(indexes) == 0 { if len(indexes) == 0 {
err := merr.WrapErrIndexNotFound(req.GetIndexName()) err := merr.WrapErrIndexNotFound(req.GetIndexName())
log.Warn("GetIndexState fail", zap.Error(err)) log.Warn("GetIndexState fail", zap.Error(err))
@ -366,7 +381,7 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
indexInfo := &indexpb.IndexInfo{} indexInfo := &indexpb.IndexInfo{}
// The total rows of all indexes should be based on the current perspective // The total rows of all indexes should be based on the current perspective
segments := s.meta.SelectSegmentIndexes(func(info *SegmentInfo) bool { segments := s.selectSegmentIndexes(func(info *SegmentInfo) bool {
return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped) return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
}) })
@ -400,7 +415,7 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
Status: merr.Success(), Status: merr.Success(),
States: make([]*indexpb.SegmentIndexState, 0), States: make([]*indexpb.SegmentIndexState, 0),
} }
indexID2CreateTs := s.meta.GetIndexIDByName(req.GetCollectionID(), req.GetIndexName()) indexID2CreateTs := s.meta.indexMeta.GetIndexIDByName(req.GetCollectionID(), req.GetIndexName())
if len(indexID2CreateTs) == 0 { if len(indexID2CreateTs) == 0 {
err := merr.WrapErrIndexNotFound(req.GetIndexName()) err := merr.WrapErrIndexNotFound(req.GetIndexName())
log.Warn("GetSegmentIndexState fail", zap.String("indexName", req.GetIndexName()), zap.Error(err)) log.Warn("GetSegmentIndexState fail", zap.String("indexName", req.GetIndexName()), zap.Error(err))
@ -410,7 +425,7 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
} }
for _, segID := range req.GetSegmentIDs() { for _, segID := range req.GetSegmentIDs() {
for indexID := range indexID2CreateTs { for indexID := range indexID2CreateTs {
state := s.meta.GetSegmentIndexState(req.GetCollectionID(), segID, indexID) state := s.meta.indexMeta.GetSegmentIndexState(req.GetCollectionID(), segID, indexID)
ret.States = append(ret.States, state) ret.States = append(ret.States, state)
} }
} }
@ -418,6 +433,32 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
return ret, nil return ret, nil
} }
func (s *Server) selectSegmentIndexes(selector SegmentInfoSelector) map[int64]*indexStats {
ret := make(map[int64]*indexStats)
for _, info := range s.meta.SelectSegments(selector) {
is := &indexStats{
ID: info.GetID(),
numRows: info.GetNumOfRows(),
compactionFrom: info.GetCompactionFrom(),
indexStates: make(map[int64]*indexpb.SegmentIndexState),
state: info.GetState(),
lastExpireTime: info.GetLastExpireTime(),
}
indexIDToSegIdxes := s.meta.indexMeta.GetSegmentIndexes(info.GetCollectionID(), info.GetID())
for indexID, segIndex := range indexIDToSegIdxes {
is.indexStates[indexID] = &indexpb.SegmentIndexState{
SegmentID: segIndex.SegmentID,
State: segIndex.IndexState,
FailReason: segIndex.FailReason,
}
}
ret[info.GetID()] = is
}
return ret
}
func (s *Server) countIndexedRows(indexInfo *indexpb.IndexInfo, segments map[int64]*indexStats) int64 { func (s *Server) countIndexedRows(indexInfo *indexpb.IndexInfo, segments map[int64]*indexStats) int64 {
unIndexed, indexed := typeutil.NewSet[int64](), typeutil.NewSet[int64]() unIndexed, indexed := typeutil.NewSet[int64](), typeutil.NewSet[int64]()
for segID, seg := range segments { for segID, seg := range segments {
@ -566,7 +607,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
}, nil }, nil
} }
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if len(indexes) == 0 { if len(indexes) == 0 {
err := merr.WrapErrIndexNotFound(req.GetIndexName()) err := merr.WrapErrIndexNotFound(req.GetIndexName())
log.Warn("GetIndexBuildProgress fail", zap.String("indexName", req.IndexName), zap.Error(err)) log.Warn("GetIndexBuildProgress fail", zap.String("indexName", req.IndexName), zap.Error(err))
@ -592,7 +633,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
} }
// The total rows of all indexes should be based on the current perspective // The total rows of all indexes should be based on the current perspective
segments := s.meta.SelectSegmentIndexes(func(info *SegmentInfo) bool { segments := s.selectSegmentIndexes(func(info *SegmentInfo) bool {
return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped) return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
}) })
@ -635,7 +676,7 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
}, nil }, nil
} }
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if len(indexes) == 0 { if len(indexes) == 0 {
err := merr.WrapErrIndexNotFound(req.GetIndexName()) err := merr.WrapErrIndexNotFound(req.GetIndexName())
log.Warn("DescribeIndex fail", zap.Error(err)) log.Warn("DescribeIndex fail", zap.Error(err))
@ -645,7 +686,7 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
} }
// The total rows of all indexes should be based on the current perspective // The total rows of all indexes should be based on the current perspective
segments := s.meta.SelectSegmentIndexes(func(info *SegmentInfo) bool { segments := s.selectSegmentIndexes(func(info *SegmentInfo) bool {
return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped) return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
}) })
@ -692,7 +733,7 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
}, nil }, nil
} }
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if len(indexes) == 0 { if len(indexes) == 0 {
err := merr.WrapErrIndexNotFound(req.GetIndexName()) err := merr.WrapErrIndexNotFound(req.GetIndexName())
log.Warn("GetIndexStatistics fail", log.Warn("GetIndexStatistics fail",
@ -704,7 +745,7 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
} }
// The total rows of all indexes should be based on the current perspective // The total rows of all indexes should be based on the current perspective
segments := s.meta.SelectSegmentIndexes(func(info *SegmentInfo) bool { segments := s.selectSegmentIndexes(func(info *SegmentInfo) bool {
return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped) return info.GetCollectionID() == req.GetCollectionID() && (isFlush(info) || info.GetState() == commonpb.SegmentState_Dropped)
}) })
@ -751,7 +792,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
return merr.Status(err), nil return merr.Status(err), nil
} }
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName()) indexes := s.meta.indexMeta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if len(indexes) == 0 { if len(indexes) == 0 {
log.Info(fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName)) log.Info(fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName))
return merr.Success(), nil return merr.Success(), nil
@ -770,7 +811,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
// from being dropped at the same time when dropping_partition in version 2.1 // from being dropped at the same time when dropping_partition in version 2.1
if len(req.GetPartitionIDs()) == 0 { if len(req.GetPartitionIDs()) == 0 {
// drop collection index // drop collection index
err := s.meta.MarkIndexAsDeleted(req.GetCollectionID(), indexIDs) err := s.meta.indexMeta.MarkIndexAsDeleted(req.GetCollectionID(), indexIDs)
if err != nil { if err != nil {
log.Warn("DropIndex fail", zap.String("indexName", req.IndexName), zap.Error(err)) log.Warn("DropIndex fail", zap.String("indexName", req.IndexName), zap.Error(err))
return merr.Status(err), nil return merr.Status(err), nil
@ -800,7 +841,7 @@ func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoReq
} }
for _, segID := range req.GetSegmentIDs() { for _, segID := range req.GetSegmentIDs() {
segIdxes := s.meta.GetSegmentIndexes(segID) segIdxes := s.meta.indexMeta.GetSegmentIndexes(req.GetCollectionID(), segID)
ret.SegmentInfo[segID] = &indexpb.SegmentInfo{ ret.SegmentInfo[segID] = &indexpb.SegmentInfo{
CollectionID: req.GetCollectionID(), CollectionID: req.GetCollectionID(),
SegmentID: segID, SegmentID: segID,
@ -813,15 +854,15 @@ func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoReq
if segIdx.IndexState == commonpb.IndexState_Finished { if segIdx.IndexState == commonpb.IndexState_Finished {
indexFilePaths := metautil.BuildSegmentIndexFilePaths(s.meta.chunkManager.RootPath(), segIdx.BuildID, segIdx.IndexVersion, indexFilePaths := metautil.BuildSegmentIndexFilePaths(s.meta.chunkManager.RootPath(), segIdx.BuildID, segIdx.IndexVersion,
segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexFileKeys) segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexFileKeys)
indexParams := s.meta.GetIndexParams(segIdx.CollectionID, segIdx.IndexID) indexParams := s.meta.indexMeta.GetIndexParams(segIdx.CollectionID, segIdx.IndexID)
indexParams = append(indexParams, s.meta.GetTypeParams(segIdx.CollectionID, segIdx.IndexID)...) indexParams = append(indexParams, s.meta.indexMeta.GetTypeParams(segIdx.CollectionID, segIdx.IndexID)...)
ret.SegmentInfo[segID].IndexInfos = append(ret.SegmentInfo[segID].IndexInfos, ret.SegmentInfo[segID].IndexInfos = append(ret.SegmentInfo[segID].IndexInfos,
&indexpb.IndexFilePathInfo{ &indexpb.IndexFilePathInfo{
SegmentID: segID, SegmentID: segID,
FieldID: s.meta.GetFieldIDByIndexID(segIdx.CollectionID, segIdx.IndexID), FieldID: s.meta.indexMeta.GetFieldIDByIndexID(segIdx.CollectionID, segIdx.IndexID),
IndexID: segIdx.IndexID, IndexID: segIdx.IndexID,
BuildID: segIdx.BuildID, BuildID: segIdx.BuildID,
IndexName: s.meta.GetIndexNameByID(segIdx.CollectionID, segIdx.IndexID), IndexName: s.meta.indexMeta.GetIndexNameByID(segIdx.CollectionID, segIdx.IndexID),
IndexParams: indexParams, IndexParams: indexParams,
IndexFilePaths: indexFilePaths, IndexFilePaths: indexFilePaths,
SerializedSize: segIdx.IndexSize, SerializedSize: segIdx.IndexSize,

View File

@ -81,10 +81,9 @@ func TestServer_CreateIndex(t *testing.T) {
) )
catalog := catalogmocks.NewDataCoordCatalog(t) catalog := catalogmocks.NewDataCoordCatalog(t)
catalog.On("CreateIndex", catalog.EXPECT().CreateIndex(mock.Anything, mock.Anything).Return(nil).Maybe()
mock.Anything,
mock.Anything, indexMeta := newSegmentIndexMeta(catalog)
).Return(nil)
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: catalog, catalog: catalog,
@ -98,7 +97,7 @@ func TestServer_CreateIndex(t *testing.T) {
CreatedAt: 0, CreatedAt: 0,
}, },
}, },
indexes: map[UniqueID]map[UniqueID]*model.Index{}, indexMeta: indexMeta,
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
notifyIndexChan: make(chan UniqueID, 1), notifyIndexChan: make(chan UniqueID, 1),
@ -110,6 +109,7 @@ func TestServer_CreateIndex(t *testing.T) {
t.Run("get field name failed", func(t *testing.T) { t.Run("get field name failed", func(t *testing.T) {
b.EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error")) b.EXPECT().DescribeCollectionInternal(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
s.broker = broker.NewCoordinatorBroker(b) s.broker = broker.NewCoordinatorBroker(b)
resp, err := s.CreateIndex(ctx, req) resp, err := s.CreateIndex(ctx, req)
assert.NoError(t, err) assert.NoError(t, err)
@ -199,7 +199,7 @@ func TestServer_CreateIndex(t *testing.T) {
t.Run("alloc ID fail", func(t *testing.T) { t.Run("alloc ID fail", func(t *testing.T) {
req.FieldID = fieldID req.FieldID = fieldID
s.allocator = &FailsAllocator{allocIDSucceed: false} s.allocator = &FailsAllocator{allocIDSucceed: false}
s.meta.indexes = map[UniqueID]map[UniqueID]*model.Index{} s.meta.indexMeta.indexes = map[UniqueID]map[UniqueID]*model.Index{}
resp, err := s.CreateIndex(ctx, req) resp, err := s.CreateIndex(ctx, req)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetErrorCode()) assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetErrorCode())
@ -207,7 +207,7 @@ func TestServer_CreateIndex(t *testing.T) {
t.Run("not support disk index", func(t *testing.T) { t.Run("not support disk index", func(t *testing.T) {
s.allocator = newMockAllocator() s.allocator = newMockAllocator()
s.meta.indexes = map[UniqueID]map[UniqueID]*model.Index{} s.meta.indexMeta.indexes = map[UniqueID]map[UniqueID]*model.Index{}
req.IndexParams = []*commonpb.KeyValuePair{ req.IndexParams = []*commonpb.KeyValuePair{
{ {
Key: common.IndexTypeKey, Key: common.IndexTypeKey,
@ -224,8 +224,9 @@ func TestServer_CreateIndex(t *testing.T) {
metakv := mockkv.NewMetaKv(t) metakv := mockkv.NewMetaKv(t)
metakv.EXPECT().Save(mock.Anything, mock.Anything).Return(errors.New("failed")).Maybe() metakv.EXPECT().Save(mock.Anything, mock.Anything).Return(errors.New("failed")).Maybe()
metakv.EXPECT().MultiSave(mock.Anything).Return(errors.New("failed")).Maybe() metakv.EXPECT().MultiSave(mock.Anything).Return(errors.New("failed")).Maybe()
s.meta.indexes = map[UniqueID]map[UniqueID]*model.Index{} s.meta.indexMeta.indexes = map[UniqueID]map[UniqueID]*model.Index{}
s.meta.catalog = &datacoord.Catalog{MetaKv: metakv} s.meta.catalog = &datacoord.Catalog{MetaKv: metakv}
s.meta.indexMeta.catalog = s.meta.catalog
req.IndexParams = []*commonpb.KeyValuePair{ req.IndexParams = []*commonpb.KeyValuePair{
{ {
Key: common.IndexTypeKey, Key: common.IndexTypeKey,
@ -278,8 +279,7 @@ func TestServer_AlterIndex(t *testing.T) {
mock.Anything, mock.Anything,
).Return(nil) ).Return(nil)
s := &Server{ indexMeta := &indexMeta{
meta: &meta{
catalog: catalog, catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -369,38 +369,8 @@ func TestServer_AlterIndex(t *testing.T) {
}, },
}, },
}, },
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
invalidSegID: {
SegmentInfo: &datapb.SegmentInfo{
ID: invalidSegID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
// timesamp > index start time, will be filtered out
Timestamp: createTS + 1,
},
},
},
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
CreatedByCompaction: true,
CompactionFrom: []int64{segID - 1},
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -487,21 +457,7 @@ func TestServer_AlterIndex(t *testing.T) {
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
segID - 1: { segID - 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Dropped,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID - 1, SegmentID: segID - 1,
CollectionID: collID, CollectionID: collID,
@ -565,6 +521,58 @@ func TestServer_AlterIndex(t *testing.T) {
}, },
}, },
}, },
}
s := &Server{
meta: &meta{
catalog: catalog,
indexMeta: indexMeta,
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
invalidSegID: {
SegmentInfo: &datapb.SegmentInfo{
ID: invalidSegID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
// timesamp > index start time, will be filtered out
Timestamp: createTS + 1,
},
},
},
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
CreatedByCompaction: true,
CompactionFrom: []int64{segID - 1},
},
},
segID - 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Dropped,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
},
}}, }},
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
@ -642,6 +650,7 @@ func TestServer_GetIndexState(t *testing.T) {
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}, catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexMeta: newSegmentIndexMeta(&datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}),
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
notifyIndexChan: make(chan UniqueID, 1), notifyIndexChan: make(chan UniqueID, 1),
@ -663,56 +672,7 @@ func TestServer_GetIndexState(t *testing.T) {
s.meta = &meta{ s.meta = &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}, catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexMeta: &indexMeta{
collID: {
indexID: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
IndexName: indexName,
IsDeleted: false,
CreateTime: createTS,
TypeParams: typeParams,
IndexParams: indexParams,
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 10250,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS - 1,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS - 1,
},
},
segmentIndexes: nil,
currRows: 0,
allocations: nil,
lastFlushTime: time.Time{},
isCompacting: false,
lastWrittenTime: time.Time{},
},
}},
}
t.Run("index state is unissued", func(t *testing.T) {
resp, err := s.GetIndexState(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, commonpb.IndexState_InProgress, resp.GetState())
})
s.meta = &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}, catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -731,6 +691,9 @@ func TestServer_GetIndexState(t *testing.T) {
}, },
}, },
}, },
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
},
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{ segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -746,7 +709,44 @@ func TestServer_GetIndexState(t *testing.T) {
Timestamp: createTS - 1, Timestamp: createTS - 1,
}, },
}, },
segmentIndexes: map[UniqueID]*model.SegmentIndex{ currRows: 0,
allocations: nil,
lastFlushTime: time.Time{},
isCompacting: false,
lastWrittenTime: time.Time{},
},
}},
}
t.Run("index state is unissued", func(t *testing.T) {
resp, err := s.GetIndexState(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, commonpb.IndexState_InProgress, resp.GetState())
})
s.meta = &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexMeta: &indexMeta{
indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: {
indexID: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
IndexName: indexName,
IsDeleted: false,
CreateTime: createTS,
TypeParams: typeParams,
IndexParams: indexParams,
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
segID: {
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -765,6 +765,24 @@ func TestServer_GetIndexState(t *testing.T) {
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
},
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 10250,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS - 1,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS - 1,
},
},
currRows: 0, currRows: 0,
allocations: nil, allocations: nil,
lastFlushTime: time.Time{}, lastFlushTime: time.Time{},
@ -782,7 +800,7 @@ func TestServer_GetIndexState(t *testing.T) {
}) })
t.Run("ambiguous index name", func(t *testing.T) { t.Run("ambiguous index name", func(t *testing.T) {
s.meta.indexes[collID][indexID+1] = &model.Index{ s.meta.indexMeta.indexes[collID][indexID+1] = &model.Index{
TenantID: "", TenantID: "",
CollectionID: collID, CollectionID: collID,
IndexID: indexID + 1, IndexID: indexID + 1,
@ -828,10 +846,13 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
SegmentIDs: []UniqueID{segID}, SegmentIDs: []UniqueID{segID},
} }
) )
indexMeta := newSegmentIndexMeta(&datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)})
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}, catalog: indexMeta.catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{}, indexMeta: indexMeta,
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{}}, segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{}},
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
@ -853,7 +874,7 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
}) })
t.Run("unfinished", func(t *testing.T) { t.Run("unfinished", func(t *testing.T) {
s.meta.indexes[collID] = map[UniqueID]*model.Index{ s.meta.indexMeta.indexes[collID] = map[UniqueID]*model.Index{
indexID: { indexID: {
TenantID: "", TenantID: "",
CollectionID: collID, CollectionID: collID,
@ -868,10 +889,7 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}, },
} }
s.meta.segments.segments[segID] = &SegmentInfo{ s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentInfo: nil,
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
@ -887,8 +905,9 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
IndexFileKeys: []string{"file1", "file2"}, IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1025, IndexSize: 1025,
WriteHandoff: false, WriteHandoff: false,
}, })
}, s.meta.segments.segments[segID] = &SegmentInfo{
SegmentInfo: nil,
currRows: 0, currRows: 0,
allocations: nil, allocations: nil,
lastFlushTime: time.Time{}, lastFlushTime: time.Time{},
@ -902,10 +921,7 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
}) })
t.Run("finish", func(t *testing.T) { t.Run("finish", func(t *testing.T) {
s.meta.segments.segments[segID] = &SegmentInfo{ s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentInfo: nil,
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
@ -921,14 +937,7 @@ func TestServer_GetSegmentIndexState(t *testing.T) {
IndexFileKeys: []string{"file1", "file2"}, IndexFileKeys: []string{"file1", "file2"},
IndexSize: 1025, IndexSize: 1025,
WriteHandoff: false, WriteHandoff: false,
}, })
},
currRows: 0,
allocations: nil,
lastFlushTime: time.Time{},
isCompacting: false,
lastWrittenTime: time.Time{},
}
resp, err := s.GetSegmentIndexState(ctx, req) resp, err := s.GetSegmentIndexState(ctx, req)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode()) assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
@ -966,7 +975,7 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}, catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexes: map[UniqueID]map[UniqueID]*model.Index{}, indexMeta: newSegmentIndexMeta(&datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}),
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{}}, segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{}},
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
@ -987,7 +996,7 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
}) })
t.Run("unissued", func(t *testing.T) { t.Run("unissued", func(t *testing.T) {
s.meta.indexes[collID] = map[UniqueID]*model.Index{ s.meta.indexMeta.indexes[collID] = map[UniqueID]*model.Index{
indexID: { indexID: {
TenantID: "", TenantID: "",
CollectionID: collID, CollectionID: collID,
@ -1018,7 +1027,6 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
Timestamp: createTS, Timestamp: createTS,
}, },
}, },
segmentIndexes: nil,
currRows: 10250, currRows: 10250,
allocations: nil, allocations: nil,
lastFlushTime: time.Time{}, lastFlushTime: time.Time{},
@ -1036,24 +1044,7 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
}) })
t.Run("finish", func(t *testing.T) { t.Run("finish", func(t *testing.T) {
s.meta.segments = &SegmentsInfo{ s.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 10250,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
@ -1069,6 +1060,21 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
IndexFileKeys: []string{"file1", "file2"}, IndexFileKeys: []string{"file1", "file2"},
IndexSize: 0, IndexSize: 0,
WriteHandoff: false, WriteHandoff: false,
})
s.meta.segments = &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 10250,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
}, },
}, },
currRows: 10250, currRows: 10250,
@ -1088,7 +1094,7 @@ func TestServer_GetIndexBuildProgress(t *testing.T) {
}) })
t.Run("multiple index", func(t *testing.T) { t.Run("multiple index", func(t *testing.T) {
s.meta.indexes[collID] = map[UniqueID]*model.Index{ s.meta.indexMeta.indexes[collID] = map[UniqueID]*model.Index{
indexID: { indexID: {
TenantID: "", TenantID: "",
CollectionID: collID, CollectionID: collID,
@ -1161,6 +1167,8 @@ func TestServer_DescribeIndex(t *testing.T) {
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: catalog,
indexMeta: &indexMeta{
catalog: catalog, catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -1250,38 +1258,8 @@ func TestServer_DescribeIndex(t *testing.T) {
}, },
}, },
}, },
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
invalidSegID: {
SegmentInfo: &datapb.SegmentInfo{
ID: invalidSegID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
// timesamp > index start time, will be filtered out
Timestamp: createTS + 1,
},
},
},
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
CreatedByCompaction: true,
CompactionFrom: []int64{segID - 1},
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -1368,21 +1346,7 @@ func TestServer_DescribeIndex(t *testing.T) {
WriteHandoff: false, WriteHandoff: false,
}, },
}, },
},
segID - 1: { segID - 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Dropped,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID - 1, SegmentID: segID - 1,
CollectionID: collID, CollectionID: collID,
@ -1396,7 +1360,7 @@ func TestServer_DescribeIndex(t *testing.T) {
CreateTime: createTS, CreateTime: createTS,
}, },
indexID + 1: { indexID + 1: {
SegmentID: segID, SegmentID: segID - 1,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
NumRows: 10000, NumRows: 10000,
@ -1408,7 +1372,7 @@ func TestServer_DescribeIndex(t *testing.T) {
CreateTime: createTS, CreateTime: createTS,
}, },
indexID + 3: { indexID + 3: {
SegmentID: segID, SegmentID: segID - 1,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
NumRows: 10000, NumRows: 10000,
@ -1420,7 +1384,7 @@ func TestServer_DescribeIndex(t *testing.T) {
CreateTime: createTS, CreateTime: createTS,
}, },
indexID + 4: { indexID + 4: {
SegmentID: segID, SegmentID: segID - 1,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
NumRows: 10000, NumRows: 10000,
@ -1433,7 +1397,7 @@ func TestServer_DescribeIndex(t *testing.T) {
CreateTime: createTS, CreateTime: createTS,
}, },
indexID + 5: { indexID + 5: {
SegmentID: segID, SegmentID: segID - 1,
CollectionID: collID, CollectionID: collID,
PartitionID: partID, PartitionID: partID,
NumRows: 10000, NumRows: 10000,
@ -1446,6 +1410,54 @@ func TestServer_DescribeIndex(t *testing.T) {
}, },
}, },
}, },
},
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
invalidSegID: {
SegmentInfo: &datapb.SegmentInfo{
ID: invalidSegID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
// timesamp > index start time, will be filtered out
Timestamp: createTS + 1,
},
},
},
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
CreatedByCompaction: true,
CompactionFrom: []int64{segID - 1},
},
},
segID - 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID - 1,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Dropped,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
},
}}, }},
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
@ -1522,6 +1534,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: catalog,
indexMeta: &indexMeta{
catalog: catalog, catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -1611,36 +1625,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
}, },
}, },
}, },
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
invalidSegID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
// timesamp > index start time, will be filtered out
Timestamp: createTS + 1,
},
},
},
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -1728,6 +1714,38 @@ func TestServer_GetIndexStatistics(t *testing.T) {
}, },
}, },
}, },
},
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
invalidSegID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
// timesamp > index start time, will be filtered out
Timestamp: createTS + 1,
},
},
},
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
StartPosition: &msgpb.MsgPosition{
Timestamp: createTS,
},
},
},
}}, }},
}, },
allocator: newMockAllocator(), allocator: newMockAllocator(),
@ -1802,6 +1820,8 @@ func TestServer_DropIndex(t *testing.T) {
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: catalog,
indexMeta: &indexMeta{
catalog: catalog, catalog: catalog,
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -1877,6 +1897,9 @@ func TestServer_DropIndex(t *testing.T) {
}, },
}, },
}, },
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
},
segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{ segments: &SegmentsInfo{map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{ SegmentInfo: &datapb.SegmentInfo{
@ -1888,7 +1911,6 @@ func TestServer_DropIndex(t *testing.T) {
MaxRowNum: 65536, MaxRowNum: 65536,
LastExpireTime: createTS, LastExpireTime: createTS,
}, },
segmentIndexes: nil,
}, },
}}, }},
}, },
@ -1911,14 +1933,14 @@ func TestServer_DropIndex(t *testing.T) {
mock.Anything, mock.Anything,
mock.Anything, mock.Anything,
).Return(errors.New("fail")) ).Return(errors.New("fail"))
s.meta.catalog = catalog s.meta.indexMeta.catalog = catalog
resp, err := s.DropIndex(ctx, req) resp, err := s.DropIndex(ctx, req)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetErrorCode()) assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetErrorCode())
}) })
t.Run("drop one index", func(t *testing.T) { t.Run("drop one index", func(t *testing.T) {
s.meta.catalog = catalog s.meta.indexMeta.catalog = catalog
resp, err := s.DropIndex(ctx, req) resp, err := s.DropIndex(ctx, req)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode()) assert.Equal(t, commonpb.ErrorCode_Success, resp.GetErrorCode())
@ -1997,6 +2019,8 @@ func TestServer_GetIndexInfos(t *testing.T) {
s := &Server{ s := &Server{
meta: &meta{ meta: &meta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexMeta: &indexMeta{
catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)}, catalog: &datacoord.Catalog{MetaKv: mockkv.NewMetaKv(t)},
indexes: map[UniqueID]map[UniqueID]*model.Index{ indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: { collID: {
@ -2016,19 +2040,8 @@ func TestServer_GetIndexInfos(t *testing.T) {
}, },
}, },
}, },
segments: &SegmentsInfo{ segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
map[UniqueID]*SegmentInfo{
segID: { segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
},
segmentIndexes: map[UniqueID]*model.SegmentIndex{
indexID: { indexID: {
SegmentID: segID, SegmentID: segID,
CollectionID: collID, CollectionID: collID,
@ -2049,6 +2062,21 @@ func TestServer_GetIndexInfos(t *testing.T) {
}, },
}, },
}, },
segments: &SegmentsInfo{
map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
NumOfRows: 10000,
State: commonpb.SegmentState_Flushed,
MaxRowNum: 65536,
LastExpireTime: createTS,
},
},
},
}, },
chunkManager: cli, chunkManager: cli,
}, },
@ -2071,3 +2099,118 @@ func TestServer_GetIndexInfos(t *testing.T) {
assert.Equal(t, 1, len(resp.GetSegmentInfo())) assert.Equal(t, 1, len(resp.GetSegmentInfo()))
}) })
} }
func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
m := &meta{
segments: &SegmentsInfo{
segments: map[UniqueID]*SegmentInfo{
segID: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Flushed,
},
},
segID + 1: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 1,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Growing,
},
},
segID + 2: {
SegmentInfo: &datapb.SegmentInfo{
ID: segID + 2,
CollectionID: collID,
PartitionID: partID,
InsertChannel: "",
NumOfRows: 1025,
State: commonpb.SegmentState_Dropped,
},
},
},
},
indexMeta: &indexMeta{
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
indexes: map[UniqueID]map[UniqueID]*model.Index{
collID: {
indexID: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID,
IndexID: indexID,
IndexName: indexName,
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: nil,
IsAutoIndex: false,
UserIndexParams: nil,
},
indexID + 1: {
TenantID: "",
CollectionID: collID,
FieldID: fieldID + 1,
IndexID: indexID + 1,
IndexName: indexName + "_1",
IsDeleted: false,
CreateTime: 0,
TypeParams: nil,
IndexParams: nil,
IsAutoIndex: false,
UserIndexParams: nil,
},
},
},
},
}
s := &Server{meta: m}
t.Run("normal", func(t *testing.T) {
segments := s.getUnIndexTaskSegments()
assert.Equal(t, 1, len(segments))
assert.Equal(t, segID, segments[0].ID)
m.indexMeta.segmentIndexes[segID] = make(map[UniqueID]*model.SegmentIndex)
m.indexMeta.updateSegmentIndex(&model.SegmentIndex{
CollectionID: collID,
SegmentID: segID,
IndexID: indexID + 2,
IndexState: commonpb.IndexState_Finished,
})
assert.Equal(t, 1, len(segments))
assert.Equal(t, segID, segments[0].ID)
})
t.Run("segment partial field with index", func(t *testing.T) {
m.indexMeta.updateSegmentIndex(&model.SegmentIndex{
CollectionID: collID,
SegmentID: segID,
IndexID: indexID,
IndexState: commonpb.IndexState_Finished,
})
segments := s.getUnIndexTaskSegments()
assert.Equal(t, 1, len(segments))
assert.Equal(t, segID, segments[0].ID)
})
t.Run("segment all vector field with index", func(t *testing.T) {
m.indexMeta.updateSegmentIndex(&model.SegmentIndex{
CollectionID: collID,
SegmentID: segID,
IndexID: indexID + 1,
IndexState: commonpb.IndexState_Finished,
})
segments := s.getUnIndexTaskSegments()
assert.Equal(t, 0, len(segments))
})
}

View File

@ -34,9 +34,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb" "github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb" "github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/storage" "github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/segmentutil" "github.com/milvus-io/milvus/internal/util/segmentutil"
"github.com/milvus-io/milvus/pkg/common" "github.com/milvus-io/milvus/pkg/common"
@ -61,12 +59,7 @@ type meta struct {
channelCPs *typeutil.ConcurrentMap[string, *msgpb.MsgPosition] // vChannel -> channel checkpoint/see position channelCPs *typeutil.ConcurrentMap[string, *msgpb.MsgPosition] // vChannel -> channel checkpoint/see position
chunkManager storage.ChunkManager chunkManager storage.ChunkManager
// collectionIndexes records which indexes are on the collection indexMeta *indexMeta
// collID -> indexID -> index
indexes map[UniqueID]map[UniqueID]*model.Index
// buildID2Meta records the meta information of the segment
// buildID -> segmentIndex
buildID2SegmentIndex map[UniqueID]*model.SegmentIndex
} }
// A local cache of segment metric update. Must call commit() to take effect. // A local cache of segment metric update. Must call commit() to take effect.
@ -87,6 +80,11 @@ type collectionInfo struct {
// NewMeta creates meta from provided `kv.TxnKV` // NewMeta creates meta from provided `kv.TxnKV`
func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManager storage.ChunkManager) (*meta, error) { func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManager storage.ChunkManager) (*meta, error) {
indexMeta, err := newIndexMeta(ctx, catalog)
if err != nil {
return nil, err
}
mt := &meta{ mt := &meta{
ctx: ctx, ctx: ctx,
catalog: catalog, catalog: catalog,
@ -94,11 +92,10 @@ func newMeta(ctx context.Context, catalog metastore.DataCoordCatalog, chunkManag
segments: NewSegmentsInfo(), segments: NewSegmentsInfo(),
channelCPLocks: lock.NewKeyLock[string](), channelCPLocks: lock.NewKeyLock[string](),
channelCPs: typeutil.NewConcurrentMap[string, *msgpb.MsgPosition](), channelCPs: typeutil.NewConcurrentMap[string, *msgpb.MsgPosition](),
indexMeta: indexMeta,
chunkManager: chunkManager, chunkManager: chunkManager,
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
} }
err := mt.reloadFromKV() err = mt.reloadFromKV()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -152,25 +149,6 @@ func (m *meta) reloadFromKV() error {
pos.ChannelName = vChannel pos.ChannelName = vChannel
m.channelCPs.Insert(vChannel, pos) m.channelCPs.Insert(vChannel, pos)
} }
// load field indexes
fieldIndexes, err := m.catalog.ListIndexes(m.ctx)
if err != nil {
log.Error("DataCoord meta reloadFromKV load field indexes fail", zap.Error(err))
return err
}
for _, fieldIndex := range fieldIndexes {
m.updateCollectionIndex(fieldIndex)
}
segmentIndexes, err := m.catalog.ListSegmentIndexes(m.ctx)
if err != nil {
log.Error("DataCoord meta reloadFromKV load segment indexes fail", zap.Error(err))
return err
}
for _, segIdx := range segmentIndexes {
m.updateSegmentIndex(segIdx)
metrics.FlushedSegmentFileNum.WithLabelValues(metrics.IndexFileLabel).Observe(float64(len(segIdx.IndexFileKeys)))
}
log.Info("DataCoord meta reloadFromKV done", zap.Duration("duration", record.ElapseSpan())) log.Info("DataCoord meta reloadFromKV done", zap.Duration("duration", record.ElapseSpan()))
return nil return nil
} }
@ -1051,33 +1029,6 @@ func (m *meta) SelectSegments(selector SegmentInfoSelector) []*SegmentInfo {
return ret return ret
} }
func (m *meta) SelectSegmentIndexes(selector SegmentInfoSelector) map[int64]*indexStats {
m.RLock()
defer m.RUnlock()
ret := make(map[int64]*indexStats)
for _, info := range m.segments.segments {
if selector(info) {
s := &indexStats{
ID: info.GetID(),
numRows: info.GetNumOfRows(),
compactionFrom: info.GetCompactionFrom(),
indexStates: make(map[int64]*indexpb.SegmentIndexState),
state: info.GetState(),
lastExpireTime: info.GetLastExpireTime(),
}
for indexID, segIndex := range info.segmentIndexes {
s.indexStates[indexID] = &indexpb.SegmentIndexState{
SegmentID: segIndex.SegmentID,
State: segIndex.IndexState,
FailReason: segIndex.FailReason,
}
}
ret[info.GetID()] = s
}
}
return ret
}
// AddAllocation add allocation in segment // AddAllocation add allocation in segment
func (m *meta) AddAllocation(segmentID UniqueID, allocation *Allocation) error { func (m *meta) AddAllocation(segmentID UniqueID, allocation *Allocation) error {
log.Debug("meta update: add allocation", log.Debug("meta update: add allocation",

View File

@ -65,6 +65,8 @@ func (suite *MetaReloadSuite) TestReloadFromKV() {
suite.Run("ListSegments_fail", func() { suite.Run("ListSegments_fail", func() {
defer suite.resetMock() defer suite.resetMock()
suite.catalog.EXPECT().ListSegments(mock.Anything).Return(nil, errors.New("mock")) suite.catalog.EXPECT().ListSegments(mock.Anything).Return(nil, errors.New("mock"))
suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil)
suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil)
_, err := newMeta(ctx, suite.catalog, nil) _, err := newMeta(ctx, suite.catalog, nil)
suite.Error(err) suite.Error(err)
@ -75,29 +77,8 @@ func (suite *MetaReloadSuite) TestReloadFromKV() {
suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{}, nil) suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{}, nil)
suite.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, errors.New("mock")) suite.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(nil, errors.New("mock"))
_, err := newMeta(ctx, suite.catalog, nil)
suite.Error(err)
})
suite.Run("ListIndexes_fail", func() {
defer suite.resetMock()
suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{}, nil)
suite.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(map[string]*msgpb.MsgPosition{}, nil)
suite.catalog.EXPECT().ListIndexes(mock.Anything).Return(nil, errors.New("mock"))
_, err := newMeta(ctx, suite.catalog, nil)
suite.Error(err)
})
suite.Run("ListSegmentIndexes_fails", func() {
defer suite.resetMock()
suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{}, nil)
suite.catalog.EXPECT().ListChannelCheckpoint(mock.Anything).Return(map[string]*msgpb.MsgPosition{}, nil)
suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil) suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil)
suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return(nil, errors.New("mock")) suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil)
_, err := newMeta(ctx, suite.catalog, nil) _, err := newMeta(ctx, suite.catalog, nil)
suite.Error(err) suite.Error(err)
@ -105,7 +86,8 @@ func (suite *MetaReloadSuite) TestReloadFromKV() {
suite.Run("ok", func() { suite.Run("ok", func() {
defer suite.resetMock() defer suite.resetMock()
suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{}, nil)
suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{}, nil)
suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{ suite.catalog.EXPECT().ListSegments(mock.Anything).Return([]*datapb.SegmentInfo{
{ {
ID: 1, ID: 1,
@ -121,25 +103,9 @@ func (suite *MetaReloadSuite) TestReloadFromKV() {
Timestamp: 1000, Timestamp: 1000,
}, },
}, nil) }, nil)
suite.catalog.EXPECT().ListIndexes(mock.Anything).Return([]*model.Index{
{
CollectionID: 1,
IndexID: 1,
IndexName: "dix",
CreateTime: 1,
},
}, nil)
suite.catalog.EXPECT().ListSegmentIndexes(mock.Anything).Return([]*model.SegmentIndex{ _, err := newMeta(ctx, suite.catalog, nil)
{
SegmentID: 1,
IndexID: 1,
},
}, nil)
meta, err := newMeta(ctx, suite.catalog, nil)
suite.NoError(err) suite.NoError(err)
suite.NotNil(meta)
suite.MetricsEqual(metrics.DataCoordNumSegments.WithLabelValues(metrics.FlushedSegmentLabel, datapb.SegmentLevel_Legacy.String()), 1) suite.MetricsEqual(metrics.DataCoordNumSegments.WithLabelValues(metrics.FlushedSegmentLabel, datapb.SegmentLevel_Legacy.String()), 1)
}) })

View File

@ -21,13 +21,10 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"go.uber.org/atomic" "go.uber.org/atomic"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb" "github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb" "github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/datapb" "github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/pkg/log"
) )
// SegmentsInfo wraps a map, which maintains ID to SegmentInfo relation // SegmentsInfo wraps a map, which maintains ID to SegmentInfo relation
@ -38,7 +35,6 @@ type SegmentsInfo struct {
// SegmentInfo wraps datapb.SegmentInfo and patches some extra info on it // SegmentInfo wraps datapb.SegmentInfo and patches some extra info on it
type SegmentInfo struct { type SegmentInfo struct {
*datapb.SegmentInfo *datapb.SegmentInfo
segmentIndexes map[UniqueID]*model.SegmentIndex
currRows int64 currRows int64
allocations []*Allocation allocations []*Allocation
lastFlushTime time.Time lastFlushTime time.Time
@ -55,7 +51,6 @@ type SegmentInfo struct {
func NewSegmentInfo(info *datapb.SegmentInfo) *SegmentInfo { func NewSegmentInfo(info *datapb.SegmentInfo) *SegmentInfo {
return &SegmentInfo{ return &SegmentInfo{
SegmentInfo: info, SegmentInfo: info,
segmentIndexes: make(map[UniqueID]*model.SegmentIndex),
currRows: info.GetNumOfRows(), currRows: info.GetNumOfRows(),
allocations: make([]*Allocation, 0, 16), allocations: make([]*Allocation, 0, 16),
lastFlushTime: time.Now().Add(-1 * flushInterval), lastFlushTime: time.Now().Add(-1 * flushInterval),
@ -104,30 +99,6 @@ func (s *SegmentsInfo) SetSegment(segmentID UniqueID, segment *SegmentInfo) {
s.segments[segmentID] = segment s.segments[segmentID] = segment
} }
// SetSegmentIndex sets SegmentIndex with segmentID, perform overwrite if already exists
func (s *SegmentsInfo) SetSegmentIndex(segmentID UniqueID, segIndex *model.SegmentIndex) {
segment, ok := s.segments[segmentID]
if !ok {
log.Warn("segment missing for set segment index",
zap.Int64("segmentID", segmentID),
zap.Int64("indexID", segIndex.IndexID),
)
return
}
segment = segment.Clone()
if segment.segmentIndexes == nil {
segment.segmentIndexes = make(map[UniqueID]*model.SegmentIndex)
}
segment.segmentIndexes[segIndex.IndexID] = segIndex
s.segments[segmentID] = segment
}
func (s *SegmentsInfo) DropSegmentIndex(segmentID UniqueID, indexID UniqueID) {
if _, ok := s.segments[segmentID]; ok {
delete(s.segments[segmentID].segmentIndexes, indexID)
}
}
// SetRowCount sets rowCount info for SegmentInfo with provided segmentID // SetRowCount sets rowCount info for SegmentInfo with provided segmentID
// if SegmentInfo not found, do nothing // if SegmentInfo not found, do nothing
func (s *SegmentsInfo) SetRowCount(segmentID UniqueID, rowCount int64) { func (s *SegmentsInfo) SetRowCount(segmentID UniqueID, rowCount int64) {
@ -213,13 +184,8 @@ func (s *SegmentsInfo) SetIsCompacting(segmentID UniqueID, isCompacting bool) {
// Clone deep clone the segment info and return a new instance // Clone deep clone the segment info and return a new instance
func (s *SegmentInfo) Clone(opts ...SegmentInfoOption) *SegmentInfo { func (s *SegmentInfo) Clone(opts ...SegmentInfoOption) *SegmentInfo {
info := proto.Clone(s.SegmentInfo).(*datapb.SegmentInfo) info := proto.Clone(s.SegmentInfo).(*datapb.SegmentInfo)
segmentIndexes := make(map[UniqueID]*model.SegmentIndex, len(s.segmentIndexes))
for indexID, segIdx := range s.segmentIndexes {
segmentIndexes[indexID] = model.CloneSegmentIndex(segIdx)
}
cloned := &SegmentInfo{ cloned := &SegmentInfo{
SegmentInfo: info, SegmentInfo: info,
segmentIndexes: segmentIndexes,
currRows: s.currRows, currRows: s.currRows,
allocations: s.allocations, allocations: s.allocations,
lastFlushTime: s.lastFlushTime, lastFlushTime: s.lastFlushTime,
@ -235,13 +201,8 @@ func (s *SegmentInfo) Clone(opts ...SegmentInfoOption) *SegmentInfo {
// ShadowClone shadow clone the segment and return a new instance // ShadowClone shadow clone the segment and return a new instance
func (s *SegmentInfo) ShadowClone(opts ...SegmentInfoOption) *SegmentInfo { func (s *SegmentInfo) ShadowClone(opts ...SegmentInfoOption) *SegmentInfo {
segmentIndexes := make(map[UniqueID]*model.SegmentIndex, len(s.segmentIndexes))
for indexID, segIdx := range s.segmentIndexes {
segmentIndexes[indexID] = model.CloneSegmentIndex(segIdx)
}
cloned := &SegmentInfo{ cloned := &SegmentInfo{
SegmentInfo: s.SegmentInfo, SegmentInfo: s.SegmentInfo,
segmentIndexes: segmentIndexes,
currRows: s.currRows, currRows: s.currRows,
allocations: s.allocations, allocations: s.allocations,
lastFlushTime: s.lastFlushTime, lastFlushTime: s.lastFlushTime,

View File

@ -26,7 +26,7 @@ import (
"syscall" "syscall"
"time" "time"
semver "github.com/blang/semver/v4" "github.com/blang/semver/v4"
"github.com/cockroachdb/errors" "github.com/cockroachdb/errors"
"github.com/samber/lo" "github.com/samber/lo"
"github.com/tikv/client-go/v2/txnkv" "github.com/tikv/client-go/v2/txnkv"

View File

@ -1452,7 +1452,7 @@ func TestGetQueryVChanPositions(t *testing.T) {
}, },
}) })
err := svr.meta.CreateIndex(&model.Index{ err := svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -1476,13 +1476,13 @@ func TestGetQueryVChanPositions(t *testing.T) {
} }
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(s1)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(s1))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: 1, SegmentID: 1,
BuildID: 1, BuildID: 1,
IndexID: 1, IndexID: 1,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: 1, BuildID: 1,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -1619,7 +1619,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
ID: 0, ID: 0,
Schema: schema, Schema: schema,
}) })
err := svr.meta.CreateIndex(&model.Index{ err := svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -1689,7 +1689,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
ID: 0, ID: 0,
Schema: schema, Schema: schema,
}) })
err := svr.meta.CreateIndex(&model.Index{ err := svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -1775,7 +1775,7 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
ID: 0, ID: 0,
Schema: schema, Schema: schema,
}) })
err := svr.meta.CreateIndex(&model.Index{ err := svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -1813,13 +1813,13 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
} }
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(d)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(d))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: 2, SegmentID: 2,
BuildID: 1, BuildID: 1,
IndexID: 1, IndexID: 1,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: 1, BuildID: 1,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -1841,13 +1841,13 @@ func TestGetQueryVChanPositions_Retrieve_unIndexed(t *testing.T) {
} }
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(e)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(e))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: 3, SegmentID: 3,
BuildID: 2, BuildID: 2,
IndexID: 1, IndexID: 1,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: 2, BuildID: 2,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -1976,7 +1976,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{ err = svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -2025,22 +2025,22 @@ func TestGetRecoveryInfo(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: seg1.ID, SegmentID: seg1.ID,
BuildID: seg1.ID, BuildID: seg1.ID,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: seg1.ID, BuildID: seg1.ID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: seg2.ID, SegmentID: seg2.ID,
BuildID: seg2.ID, BuildID: seg2.ID,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: seg2.ID, BuildID: seg2.ID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -2195,7 +2195,7 @@ func TestGetRecoveryInfo(t *testing.T) {
err := svr.meta.AddSegment(context.TODO(), NewSegmentInfo(segment)) err := svr.meta.AddSegment(context.TODO(), NewSegmentInfo(segment))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{ err = svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -2203,12 +2203,12 @@ func TestGetRecoveryInfo(t *testing.T) {
IndexName: "", IndexName: "",
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: segment.ID, SegmentID: segment.ID,
BuildID: segment.ID, BuildID: segment.ID,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: segment.ID, BuildID: segment.ID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -2361,7 +2361,7 @@ func TestGetRecoveryInfo(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg5)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg5))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{ err = svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -2375,7 +2375,7 @@ func TestGetRecoveryInfo(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}) })
assert.NoError(t, err) assert.NoError(t, err)
svr.meta.segments.SetSegmentIndex(seg4.ID, &model.SegmentIndex{ svr.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: seg4.ID, SegmentID: seg4.ID,
CollectionID: 0, CollectionID: 0,
PartitionID: 0, PartitionID: 0,

View File

@ -894,7 +894,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{ err = svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -943,22 +943,22 @@ func TestGetRecoveryInfoV2(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: seg1.ID, SegmentID: seg1.ID,
BuildID: seg1.ID, BuildID: seg1.ID,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: seg1.ID, BuildID: seg1.ID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: seg2.ID, SegmentID: seg2.ID,
BuildID: seg2.ID, BuildID: seg2.ID,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: seg2.ID, BuildID: seg2.ID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -1118,7 +1118,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
err := svr.meta.AddSegment(context.TODO(), NewSegmentInfo(segment)) err := svr.meta.AddSegment(context.TODO(), NewSegmentInfo(segment))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{ err = svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -1126,12 +1126,12 @@ func TestGetRecoveryInfoV2(t *testing.T) {
IndexName: "", IndexName: "",
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegmentIndex(&model.SegmentIndex{ err = svr.meta.indexMeta.AddSegmentIndex(&model.SegmentIndex{
SegmentID: segment.ID, SegmentID: segment.ID,
BuildID: segment.ID, BuildID: segment.ID,
}) })
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.FinishTask(&indexpb.IndexTaskInfo{ err = svr.meta.indexMeta.FinishTask(&indexpb.IndexTaskInfo{
BuildID: segment.ID, BuildID: segment.ID,
State: commonpb.IndexState_Finished, State: commonpb.IndexState_Finished,
}) })
@ -1284,7 +1284,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg5)) err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg5))
assert.NoError(t, err) assert.NoError(t, err)
err = svr.meta.CreateIndex(&model.Index{ err = svr.meta.indexMeta.CreateIndex(&model.Index{
TenantID: "", TenantID: "",
CollectionID: 0, CollectionID: 0,
FieldID: 2, FieldID: 2,
@ -1298,7 +1298,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
UserIndexParams: nil, UserIndexParams: nil,
}) })
assert.NoError(t, err) assert.NoError(t, err)
svr.meta.segments.SetSegmentIndex(seg4.ID, &model.SegmentIndex{ svr.meta.indexMeta.updateSegmentIndex(&model.SegmentIndex{
SegmentID: seg4.ID, SegmentID: seg4.ID,
CollectionID: 0, CollectionID: 0,
PartitionID: 0, PartitionID: 0,

View File

@ -102,7 +102,7 @@ func FilterInIndexedSegments(handler Handler, mt *meta, segments ...*SegmentInfo
hasUnindexedVecField := false hasUnindexedVecField := false
for _, fieldID := range vecFieldIDs[segment.GetCollectionID()] { for _, fieldID := range vecFieldIDs[segment.GetCollectionID()] {
segmentIndexState := mt.GetSegmentIndexStateOnField(segment.GetCollectionID(), segment.GetID(), fieldID) segmentIndexState := mt.indexMeta.GetSegmentIndexStateOnField(segment.GetCollectionID(), segment.GetID(), fieldID)
if segmentIndexState.State != commonpb.IndexState_Finished { if segmentIndexState.State != commonpb.IndexState_Finished {
hasUnindexedVecField = true hasUnindexedVecField = true
} }

View File

@ -82,11 +82,33 @@ func (s *DataNodeSuite) compactAndReboot(collection string) {
CollectionName: collection, CollectionName: collection,
FieldName: integration.FloatVecField, FieldName: integration.FloatVecField,
IndexName: "_default", IndexName: "_default",
ExtraParams: integration.ConstructIndexParam(s.dim, "FLAT", metric.IP), ExtraParams: integration.ConstructIndexParam(s.dim, integration.IndexHNSW, metric.IP),
}) })
s.Require().NoError(err) s.Require().NoError(err)
s.Require().True(merr.Ok(createIndexStatus)) s.Require().True(merr.Ok(createIndexStatus))
for stay, timeout := true, time.After(time.Second*10); stay; {
select {
case <-timeout:
stay = false
default:
describeIndexResp, err := s.Cluster.Proxy.DescribeIndex(ctx, &milvuspb.DescribeIndexRequest{
CollectionName: collection,
FieldName: integration.FloatVecField,
IndexName: "_default",
})
s.Require().NoError(err)
for _, d := range describeIndexResp.GetIndexDescriptions() {
if d.GetFieldName() == integration.FloatVecField && d.GetState() == commonpb.IndexState_Finished {
log.Info("build index finished", zap.Any("index_desc", d))
stay = false
}
}
time.Sleep(1 * time.Second)
}
}
coll, err := s.Cluster.Proxy.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{ coll, err := s.Cluster.Proxy.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
CollectionName: collection, CollectionName: collection,
}) })
@ -112,7 +134,7 @@ func (s *DataNodeSuite) compactAndReboot(collection string) {
s.Require().True(merr.Ok(stateResp.GetStatus())) s.Require().True(merr.Ok(stateResp.GetStatus()))
// sleep to ensure compaction tasks are submitted to DN // sleep to ensure compaction tasks are submitted to DN
time.Sleep(time.Second) time.Sleep(3 * time.Second)
planResp, err := s.Cluster.Proxy.GetCompactionStateWithPlans(ctx, &milvuspb.GetCompactionPlansRequest{ planResp, err := s.Cluster.Proxy.GetCompactionStateWithPlans(ctx, &milvuspb.GetCompactionPlansRequest{
CompactionID: compactID, CompactionID: compactID,