mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
enhance: Refine task meta with key lock (#40613)
issue: #39101 2.5 pr: #40146 #40353 Signed-off-by: Cai Zhang <cai.zhang@zilliz.com>
This commit is contained in:
parent
bf4fc6a8c6
commit
6dbe5d475e
@ -169,7 +169,7 @@ func Test_compactionTrigger_force_without_index(t *testing.T) {
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{},
|
||||
},
|
||||
collections: map[int64]*collectionInfo{
|
||||
@ -308,6 +308,109 @@ func Test_compactionTrigger_force(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
1000: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 1000,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "DISKANN",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx3 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx3.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
CollectionID: 1111,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
im.segmentIndexes.Insert(1, segIdx1)
|
||||
im.segmentIndexes.Insert(2, segIdx2)
|
||||
im.segmentIndexes.Insert(3, segIdx3)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
@ -340,109 +443,7 @@ func Test_compactionTrigger_force(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: {
|
||||
indexID: {
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
2: {
|
||||
indexID: {
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
3: {
|
||||
indexID: {
|
||||
SegmentID: 3,
|
||||
CollectionID: 1111,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
1000: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 1000,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "DISKANN",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -1200,21 +1201,53 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
|
||||
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
}
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, genSegIndex(1, indexID, 20))
|
||||
im.segmentIndexes.Insert(2, genSegIndex(2, indexID, 20))
|
||||
im.segmentIndexes.Insert(3, genSegIndex(3, indexID, 20))
|
||||
im.segmentIndexes.Insert(4, genSegIndex(4, indexID, 20))
|
||||
im.segmentIndexes.Insert(5, genSegIndex(5, indexID, 20))
|
||||
im.segmentIndexes.Insert(6, genSegIndex(6, indexID, 20))
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
@ -1228,39 +1261,8 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
|
||||
// 8 small segments
|
||||
channelCPs: newChannelCps(),
|
||||
|
||||
segments: mockSegmentsInfo(20, 20, 20, 20, 20, 20),
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: genSegIndex(1, indexID, 20),
|
||||
2: genSegIndex(2, indexID, 20),
|
||||
3: genSegIndex(3, indexID, 20),
|
||||
4: genSegIndex(4, indexID, 20),
|
||||
5: genSegIndex(5, indexID, 20),
|
||||
6: genSegIndex(6, indexID, 20),
|
||||
},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
segments: mockSegmentsInfo(20, 20, 20, 20, 20, 20),
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -1341,21 +1343,53 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
|
||||
vecFieldID := int64(201)
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
}
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, genSegIndex(1, indexID, 20))
|
||||
im.segmentIndexes.Insert(2, genSegIndex(2, indexID, 20))
|
||||
im.segmentIndexes.Insert(3, genSegIndex(3, indexID, 20))
|
||||
im.segmentIndexes.Insert(4, genSegIndex(4, indexID, 20))
|
||||
im.segmentIndexes.Insert(5, genSegIndex(5, indexID, 20))
|
||||
im.segmentIndexes.Insert(6, genSegIndex(6, indexID, 20))
|
||||
im.segmentIndexes.Insert(7, genSegIndex(7, indexID, 20))
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
@ -1370,40 +1404,8 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
|
||||
channelCPs: newChannelCps(),
|
||||
// 7 segments with 200MB each, the compaction is expected to be triggered
|
||||
// as the first 5 being merged, and 1 plus being squeezed.
|
||||
segments: mockSegmentsInfo(200, 200, 200, 200, 200, 200, 200),
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: genSegIndex(1, indexID, 20),
|
||||
2: genSegIndex(2, indexID, 20),
|
||||
3: genSegIndex(3, indexID, 20),
|
||||
4: genSegIndex(4, indexID, 20),
|
||||
5: genSegIndex(5, indexID, 20),
|
||||
6: genSegIndex(6, indexID, 20),
|
||||
7: genSegIndex(7, indexID, 20),
|
||||
},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
segments: mockSegmentsInfo(200, 200, 200, 200, 200, 200, 200),
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -1484,21 +1486,52 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
|
||||
}
|
||||
vecFieldID := int64(201)
|
||||
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
}
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, genSegIndex(1, indexID, 20))
|
||||
im.segmentIndexes.Insert(2, genSegIndex(2, indexID, 20))
|
||||
im.segmentIndexes.Insert(3, genSegIndex(3, indexID, 20))
|
||||
im.segmentIndexes.Insert(4, genSegIndex(4, indexID, 20))
|
||||
im.segmentIndexes.Insert(5, genSegIndex(5, indexID, 20))
|
||||
im.segmentIndexes.Insert(6, genSegIndex(6, indexID, 20))
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -1513,39 +1546,8 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
|
||||
&meta{
|
||||
channelCPs: newChannelCps(),
|
||||
|
||||
segments: mockSegmentsInfo(600, 600, 600, 600, 260, 260),
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: genSegIndex(1, indexID, 20),
|
||||
2: genSegIndex(2, indexID, 20),
|
||||
3: genSegIndex(3, indexID, 20),
|
||||
4: genSegIndex(4, indexID, 20),
|
||||
5: genSegIndex(5, indexID, 20),
|
||||
6: genSegIndex(6, indexID, 20),
|
||||
},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: 2,
|
||||
FieldID: vecFieldID,
|
||||
IndexID: indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
segments: mockSegmentsInfo(600, 600, 600, 600, 260, 260),
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -2191,20 +2193,20 @@ func (s *CompactionTriggerSuite) genSeg(segID, numRows int64) *datapb.SegmentInf
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CompactionTriggerSuite) genSegIndex(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: s.collectionID,
|
||||
PartitionID: s.partitionID,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
}
|
||||
func (s *CompactionTriggerSuite) genSegIndex(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: s.collectionID,
|
||||
PartitionID: s.partitionID,
|
||||
NumRows: numRows,
|
||||
IndexID: indexID,
|
||||
BuildID: segID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
|
||||
func (s *CompactionTriggerSuite) SetupTest() {
|
||||
@ -2241,6 +2243,37 @@ func (s *CompactionTriggerSuite) SetupTest() {
|
||||
lastFlushTime: time.Now(),
|
||||
}
|
||||
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
s.collectionID: {
|
||||
s.indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: s.collectionID,
|
||||
FieldID: s.vecFieldID,
|
||||
IndexID: s.indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, s.genSegIndex(1, indexID, 60))
|
||||
im.segmentIndexes.Insert(2, s.genSegIndex(2, indexID, 60))
|
||||
im.segmentIndexes.Insert(3, s.genSegIndex(3, indexID, 60))
|
||||
im.segmentIndexes.Insert(4, s.genSegIndex(4, indexID, 60))
|
||||
im.segmentIndexes.Insert(5, s.genSegIndex(5, indexID, 60))
|
||||
im.segmentIndexes.Insert(6, s.genSegIndex(6, indexID, 60))
|
||||
s.meta = &meta{
|
||||
channelCPs: newChannelCps(),
|
||||
catalog: catalog,
|
||||
@ -2276,38 +2309,7 @@ func (s *CompactionTriggerSuite) SetupTest() {
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: s.genSegIndex(1, indexID, 60),
|
||||
2: s.genSegIndex(2, indexID, 60),
|
||||
3: s.genSegIndex(3, indexID, 60),
|
||||
4: s.genSegIndex(4, indexID, 60),
|
||||
5: s.genSegIndex(5, indexID, 26),
|
||||
6: s.genSegIndex(6, indexID, 26),
|
||||
},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
s.collectionID: {
|
||||
s.indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: s.collectionID,
|
||||
FieldID: s.vecFieldID,
|
||||
IndexID: s.indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
s.collectionID: {
|
||||
ID: s.collectionID,
|
||||
@ -2731,6 +2733,45 @@ func Test_compactionTrigger_generatePlans(t *testing.T) {
|
||||
compactTime *compactTime
|
||||
expectedSize int64
|
||||
}
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(1, segIdx0)
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(2, segIdx1)
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
@ -2758,46 +2799,7 @@ func Test_compactionTrigger_generatePlans(t *testing.T) {
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: {
|
||||
indexID: {
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
2: {
|
||||
indexID: {
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
|
||||
@ -54,6 +54,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func Test_garbageCollector_basic(t *testing.T) {
|
||||
@ -476,6 +477,45 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
|
||||
},
|
||||
},
|
||||
}
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
meta := &meta{
|
||||
RWMutex: lock.RWMutex{},
|
||||
ctx: ctx,
|
||||
@ -483,47 +523,8 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
|
||||
collections: nil,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
catalog: catalog,
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{},
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
},
|
||||
@ -641,6 +642,45 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
|
||||
},
|
||||
},
|
||||
}
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
meta := &meta{
|
||||
RWMutex: lock.RWMutex{},
|
||||
ctx: ctx,
|
||||
@ -648,47 +688,8 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
|
||||
collections: nil,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
catalog: catalog,
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -1043,52 +1044,53 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 5000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 1024,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 5000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: []string{"file3", "file4"},
|
||||
IndexSerializedSize: 1024,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
m := &meta{
|
||||
catalog: catalog,
|
||||
channelCPs: channelCPs,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 5000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 1024,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 5000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: []string{"file3", "file4"},
|
||||
IndexSerializedSize: 1024,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
catalog: catalog,
|
||||
segmentIndexes: segIndexes,
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
|
||||
@ -44,6 +44,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/indexparams"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
||||
@ -52,19 +53,21 @@ import (
|
||||
)
|
||||
|
||||
type indexMeta struct {
|
||||
sync.RWMutex
|
||||
ctx context.Context
|
||||
catalog metastore.DataCoordCatalog
|
||||
|
||||
// collectionIndexes records which indexes are on the collection
|
||||
// collID -> indexID -> index
|
||||
indexes map[UniqueID]map[UniqueID]*model.Index
|
||||
fieldIndexLock sync.RWMutex
|
||||
indexes map[UniqueID]map[UniqueID]*model.Index
|
||||
|
||||
// buildID2Meta records building index meta information of the segment
|
||||
segmentBuildInfo *segmentBuildInfo
|
||||
|
||||
// buildID -> lock
|
||||
keyLock *lock.KeyLock[UniqueID]
|
||||
// segmentID -> indexID -> segmentIndex
|
||||
segmentIndexes map[UniqueID]map[UniqueID]*model.SegmentIndex
|
||||
segmentIndexes *typeutil.ConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]
|
||||
}
|
||||
|
||||
func newIndexTaskStats(s *model.SegmentIndex) *metricsinfo.IndexTaskStats {
|
||||
@ -86,7 +89,7 @@ func newIndexTaskStats(s *model.SegmentIndex) *metricsinfo.IndexTaskStats {
|
||||
type segmentBuildInfo struct {
|
||||
// buildID2Meta records the meta information of the segment
|
||||
// buildID -> segmentIndex
|
||||
buildID2SegmentIndex map[UniqueID]*model.SegmentIndex
|
||||
buildID2SegmentIndex *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]
|
||||
// taskStats records the task stats of the segment
|
||||
taskStats *expirable.LRU[UniqueID, *metricsinfo.IndexTaskStats]
|
||||
}
|
||||
@ -94,28 +97,28 @@ type segmentBuildInfo struct {
|
||||
func newSegmentIndexBuildInfo() *segmentBuildInfo {
|
||||
return &segmentBuildInfo{
|
||||
// build ID -> segment index
|
||||
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
|
||||
buildID2SegmentIndex: typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex](),
|
||||
// build ID -> task stats
|
||||
taskStats: expirable.NewLRU[UniqueID, *metricsinfo.IndexTaskStats](1024, nil, time.Minute*30),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) Add(segIdx *model.SegmentIndex) {
|
||||
m.buildID2SegmentIndex[segIdx.BuildID] = segIdx
|
||||
m.buildID2SegmentIndex.Insert(segIdx.BuildID, segIdx)
|
||||
m.taskStats.Add(segIdx.BuildID, newIndexTaskStats(segIdx))
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) Get(key UniqueID) (*model.SegmentIndex, bool) {
|
||||
value, exists := m.buildID2SegmentIndex[key]
|
||||
value, exists := m.buildID2SegmentIndex.Get(key)
|
||||
return value, exists
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) Remove(key UniqueID) {
|
||||
delete(m.buildID2SegmentIndex, key)
|
||||
m.buildID2SegmentIndex.Remove(key)
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) List() map[UniqueID]*model.SegmentIndex {
|
||||
return m.buildID2SegmentIndex
|
||||
func (m *segmentBuildInfo) List() []*model.SegmentIndex {
|
||||
return m.buildID2SegmentIndex.Values()
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) GetTaskStats() []*metricsinfo.IndexTaskStats {
|
||||
@ -128,8 +131,9 @@ func newIndexMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (*ind
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
}
|
||||
err := mt.reloadFromKV()
|
||||
if err != nil {
|
||||
@ -174,12 +178,14 @@ func (m *indexMeta) updateCollectionIndex(index *model.Index) {
|
||||
}
|
||||
|
||||
func (m *indexMeta) updateSegmentIndex(segIdx *model.SegmentIndex) {
|
||||
indexes, ok := m.segmentIndexes[segIdx.SegmentID]
|
||||
indexes, ok := m.segmentIndexes.Get(segIdx.SegmentID)
|
||||
if ok {
|
||||
indexes[segIdx.IndexID] = segIdx
|
||||
indexes.Insert(segIdx.IndexID, segIdx)
|
||||
m.segmentIndexes.Insert(segIdx.SegmentID, indexes)
|
||||
} else {
|
||||
m.segmentIndexes[segIdx.SegmentID] = make(map[UniqueID]*model.SegmentIndex)
|
||||
m.segmentIndexes[segIdx.SegmentID][segIdx.IndexID] = segIdx
|
||||
indexes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
indexes.Insert(segIdx.IndexID, segIdx)
|
||||
m.segmentIndexes.Insert(segIdx.SegmentID, indexes)
|
||||
}
|
||||
m.segmentBuildInfo.Add(segIdx)
|
||||
}
|
||||
@ -206,36 +212,37 @@ func (m *indexMeta) updateSegIndexMeta(segIdx *model.SegmentIndex, updateFunc fu
|
||||
}
|
||||
|
||||
func (m *indexMeta) updateIndexTasksMetrics() {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
taskMetrics := make(map[UniqueID]map[commonpb.IndexState]int)
|
||||
taskMetrics := make(map[indexpb.JobState]int)
|
||||
taskMetrics[indexpb.JobState_JobStateNone] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInit] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInProgress] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFinished] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFailed] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateRetry] = 0
|
||||
for _, segIdx := range m.segmentBuildInfo.List() {
|
||||
if segIdx.IsDeleted || !m.isIndexExist(segIdx.CollectionID, segIdx.IndexID) {
|
||||
if segIdx.IsDeleted || !m.IsIndexExist(segIdx.CollectionID, segIdx.IndexID) {
|
||||
continue
|
||||
}
|
||||
if _, ok := taskMetrics[segIdx.CollectionID]; !ok {
|
||||
taskMetrics[segIdx.CollectionID] = make(map[commonpb.IndexState]int)
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Unissued] = 0
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_InProgress] = 0
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Finished] = 0
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Failed] = 0
|
||||
|
||||
switch segIdx.IndexState {
|
||||
case commonpb.IndexState_IndexStateNone:
|
||||
taskMetrics[indexpb.JobState_JobStateNone]++
|
||||
case commonpb.IndexState_Unissued:
|
||||
taskMetrics[indexpb.JobState_JobStateInit]++
|
||||
case commonpb.IndexState_InProgress:
|
||||
taskMetrics[indexpb.JobState_JobStateInProgress]++
|
||||
case commonpb.IndexState_Finished:
|
||||
taskMetrics[indexpb.JobState_JobStateFinished]++
|
||||
case commonpb.IndexState_Failed:
|
||||
taskMetrics[indexpb.JobState_JobStateFailed]++
|
||||
case commonpb.IndexState_Retry:
|
||||
taskMetrics[indexpb.JobState_JobStateRetry]++
|
||||
}
|
||||
taskMetrics[segIdx.CollectionID][segIdx.IndexState]++
|
||||
}
|
||||
for collID, m := range taskMetrics {
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case commonpb.IndexState_Unissued:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.UnissuedIndexTaskLabel).Set(float64(v))
|
||||
case commonpb.IndexState_InProgress:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.InProgressIndexTaskLabel).Set(float64(v))
|
||||
case commonpb.IndexState_Finished:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.FinishedIndexTaskLabel).Set(float64(v))
|
||||
case commonpb.IndexState_Failed:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.FailedIndexTaskLabel).Set(float64(v))
|
||||
}
|
||||
}
|
||||
|
||||
jobType := indexpb.JobType_JobTypeIndexJob.String()
|
||||
for k, v := range taskMetrics {
|
||||
metrics.TaskNum.WithLabelValues(jobType, k.String()).Set(float64(v))
|
||||
}
|
||||
log.Ctx(m.ctx).Info("update index metric", zap.Int("collectionNum", len(taskMetrics)))
|
||||
}
|
||||
@ -345,9 +352,10 @@ func checkParams(fieldIndex *model.Index, req *indexpb.CreateIndexRequest) bool
|
||||
return !notEq
|
||||
}
|
||||
|
||||
// CanCreateIndex currently is used in Unittest
|
||||
func (m *indexMeta) CanCreateIndex(req *indexpb.CreateIndexRequest, isJson bool) (UniqueID, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexes, ok := m.indexes[req.CollectionID]
|
||||
if !ok {
|
||||
@ -395,8 +403,8 @@ func (m *indexMeta) CanCreateIndex(req *indexpb.CreateIndexRequest, isJson bool)
|
||||
|
||||
// HasSameReq determine whether there are same indexing tasks.
|
||||
func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
for _, fieldIndex := range m.indexes[req.CollectionID] {
|
||||
if fieldIndex.IsDeleted {
|
||||
@ -420,8 +428,8 @@ func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID)
|
||||
func (m *indexMeta) CreateIndex(ctx context.Context, index *model.Index) error {
|
||||
log.Ctx(ctx).Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID),
|
||||
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName))
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
if err := m.catalog.CreateIndex(ctx, index); err != nil {
|
||||
log.Ctx(ctx).Error("meta update: CreateIndex save meta fail", zap.Int64("collectionID", index.CollectionID),
|
||||
@ -437,8 +445,8 @@ func (m *indexMeta) CreateIndex(ctx context.Context, index *model.Index) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
err := m.catalog.AlterIndexes(ctx, indexes)
|
||||
if err != nil {
|
||||
@ -454,10 +462,11 @@ func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) err
|
||||
|
||||
// AddSegmentIndex adds the index meta corresponding the indexBuildID to meta table.
|
||||
func (m *indexMeta) AddSegmentIndex(ctx context.Context, segIndex *model.SegmentIndex) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
buildID := segIndex.BuildID
|
||||
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
log.Ctx(ctx).Info("meta update: adding segment index", zap.Int64("collectionID", segIndex.CollectionID),
|
||||
zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID),
|
||||
zap.Int64("buildID", buildID))
|
||||
@ -477,8 +486,9 @@ func (m *indexMeta) AddSegmentIndex(ctx context.Context, segIndex *model.Segment
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexIDByName(collID int64, indexName string) map[int64]uint64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexID2CreateTs := make(map[int64]uint64)
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
@ -495,21 +505,22 @@ func (m *indexMeta) GetIndexIDByName(collID int64, indexName string) map[int64]u
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID) *indexpb.SegmentIndexState {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
state := &indexpb.SegmentIndexState{
|
||||
SegmentID: segmentID,
|
||||
State: commonpb.IndexState_IndexStateNone,
|
||||
FailReason: "",
|
||||
}
|
||||
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID)
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return state
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexes, ok := m.segmentIndexes[segmentID]
|
||||
indexes, ok := m.segmentIndexes.Get(segmentID)
|
||||
if !ok {
|
||||
state.State = commonpb.IndexState_Unissued
|
||||
state.FailReason = fmt.Sprintf("segment index not exist with ID: %d", segmentID)
|
||||
@ -517,7 +528,7 @@ func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID Uni
|
||||
}
|
||||
|
||||
if index, ok := fieldIndexes[indexID]; ok && !index.IsDeleted {
|
||||
if segIdx, ok := indexes[indexID]; ok {
|
||||
if segIdx, ok := indexes.Get(indexID); ok {
|
||||
state.IndexName = index.IndexName
|
||||
state.State = segIdx.IndexState
|
||||
state.FailReason = segIdx.FailReason
|
||||
@ -532,24 +543,24 @@ func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID Uni
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexedSegments(collectionID int64, segmentIDs, fieldIDs []UniqueID) []int64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
fieldIDSet := typeutil.NewUniqueSet(fieldIDs...)
|
||||
|
||||
checkSegmentState := func(indexes map[int64]*model.SegmentIndex) bool {
|
||||
checkSegmentState := func(indexes *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]) bool {
|
||||
indexedFields := 0
|
||||
for indexID, index := range fieldIndexes {
|
||||
if !fieldIDSet.Contain(index.FieldID) || index.IsDeleted {
|
||||
continue
|
||||
}
|
||||
|
||||
if segIdx, ok := indexes[indexID]; ok && segIdx.IndexState == commonpb.IndexState_Finished {
|
||||
if segIdx, ok := indexes.Get(indexID); ok && segIdx.IndexState == commonpb.IndexState_Finished {
|
||||
indexedFields += 1
|
||||
}
|
||||
}
|
||||
@ -559,7 +570,7 @@ func (m *indexMeta) GetIndexedSegments(collectionID int64, segmentIDs, fieldIDs
|
||||
|
||||
ret := make([]int64, 0)
|
||||
for _, sid := range segmentIDs {
|
||||
if indexes, ok := m.segmentIndexes[sid]; ok {
|
||||
if indexes, ok := m.segmentIndexes.Get(sid); ok {
|
||||
if checkSegmentState(indexes) {
|
||||
ret = append(ret, sid)
|
||||
}
|
||||
@ -571,8 +582,8 @@ func (m *indexMeta) GetIndexedSegments(collectionID int64, segmentIDs, fieldIDs
|
||||
|
||||
// GetIndexesForCollection gets all indexes info with the specified collection.
|
||||
func (m *indexMeta) GetIndexesForCollection(collID UniqueID, indexName string) []*model.Index {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexInfos := make([]*model.Index, 0)
|
||||
for _, index := range m.indexes[collID] {
|
||||
@ -587,8 +598,8 @@ func (m *indexMeta) GetIndexesForCollection(collID UniqueID, indexName string) [
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) []*model.Index {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexInfos := make([]*model.Index, 0)
|
||||
for _, index := range m.indexes[collID] {
|
||||
@ -607,8 +618,8 @@ func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, ind
|
||||
log.Ctx(ctx).Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID),
|
||||
zap.Int64s("indexIDs", indexIDs))
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
@ -641,23 +652,23 @@ func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, ind
|
||||
}
|
||||
|
||||
func (m *indexMeta) IsUnIndexedSegment(collectionID UniqueID, segID UniqueID) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return false
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
// the segment should be unindexed status if the fieldIndexes is not nil
|
||||
segIndexInfos, ok := m.segmentIndexes[segID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, index := range fieldIndexes {
|
||||
if _, ok := segIndexInfos[index.IndexID]; !index.IsDeleted {
|
||||
if !ok {
|
||||
if !index.IsDeleted {
|
||||
if _, ok := segIndexInfos.Get(index.IndexID); !ok {
|
||||
// the segment should be unindexed status if the segment index is not found within field indexes
|
||||
return true
|
||||
}
|
||||
@ -668,8 +679,6 @@ func (m *indexMeta) IsUnIndexedSegment(collectionID UniqueID, segID UniqueID) bo
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentsIndexes(collectionID UniqueID, segIDs []UniqueID) map[int64]map[UniqueID]*model.SegmentIndex {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
segmentsIndexes := make(map[int64]map[UniqueID]*model.SegmentIndex)
|
||||
for _, segmentID := range segIDs {
|
||||
segmentsIndexes[segmentID] = m.getSegmentIndexes(collectionID, segmentID)
|
||||
@ -678,16 +687,14 @@ func (m *indexMeta) GetSegmentsIndexes(collectionID UniqueID, segIDs []UniqueID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentIndexes(collectionID UniqueID, segID UniqueID) map[UniqueID]*model.SegmentIndex {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.getSegmentIndexes(collectionID, segID)
|
||||
}
|
||||
|
||||
// Note: thread-unsafe, don't call it outside indexMeta
|
||||
func (m *indexMeta) getSegmentIndexes(collectionID UniqueID, segID UniqueID) map[UniqueID]*model.SegmentIndex {
|
||||
ret := make(map[UniqueID]*model.SegmentIndex, 0)
|
||||
segIndexInfos, ok := m.segmentIndexes[segID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
return ret
|
||||
}
|
||||
|
||||
@ -696,7 +703,7 @@ func (m *indexMeta) getSegmentIndexes(collectionID UniqueID, segID UniqueID) map
|
||||
return ret
|
||||
}
|
||||
|
||||
for _, segIdx := range segIndexInfos {
|
||||
for _, segIdx := range segIndexInfos.Values() {
|
||||
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
|
||||
ret[segIdx.IndexID] = model.CloneSegmentIndex(segIdx)
|
||||
}
|
||||
@ -705,8 +712,8 @@ func (m *indexMeta) getSegmentIndexes(collectionID UniqueID, segID UniqueID) map
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
if fieldIndexes, ok := m.indexes[collID]; ok {
|
||||
if index, ok := fieldIndexes[indexID]; ok {
|
||||
@ -717,8 +724,9 @@ func (m *indexMeta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexNameByID(collID, indexID UniqueID) string {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
if fieldIndexes, ok := m.indexes[collID]; ok {
|
||||
if index, ok := fieldIndexes[indexID]; ok {
|
||||
return index.IndexName
|
||||
@ -728,8 +736,8 @@ func (m *indexMeta) GetIndexNameByID(collID, indexID UniqueID) string {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
@ -749,8 +757,8 @@ func (m *indexMeta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValu
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
@ -770,9 +778,6 @@ func (m *indexMeta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValue
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if ok {
|
||||
return model.CloneSegmentIndex(segIdx), true
|
||||
@ -782,13 +787,9 @@ func (m *indexMeta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
|
||||
}
|
||||
|
||||
func (m *indexMeta) IsIndexExist(collID, indexID UniqueID) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
return m.isIndexExist(collID, indexID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) isIndexExist(collID, indexID UniqueID) bool {
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
return false
|
||||
@ -802,8 +803,8 @@ func (m *indexMeta) isIndexExist(collID, indexID UniqueID) bool {
|
||||
|
||||
// UpdateVersion updates the version and nodeID of the index meta, whenever the task is built once, the version will be updated once.
|
||||
func (m *indexMeta) UpdateVersion(buildID, nodeID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
log.Ctx(m.ctx).Info("IndexCoord metaTable UpdateVersion receive", zap.Int64("buildID", buildID), zap.Int64("nodeID", nodeID))
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
@ -821,8 +822,8 @@ func (m *indexMeta) UpdateVersion(buildID, nodeID UniqueID) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) FinishTask(taskInfo *workerpb.IndexTaskInfo) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(taskInfo.GetBuildID())
|
||||
defer m.keyLock.Unlock(taskInfo.GetBuildID())
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(taskInfo.GetBuildID())
|
||||
if !ok {
|
||||
@ -854,8 +855,8 @@ func (m *indexMeta) FinishTask(taskInfo *workerpb.IndexTaskInfo) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) DeleteTask(buildID int64) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if !ok {
|
||||
@ -878,8 +879,8 @@ func (m *indexMeta) DeleteTask(buildID int64) error {
|
||||
|
||||
// BuildIndex set the index state to be InProgress. It means IndexNode is building the index.
|
||||
func (m *indexMeta) BuildIndex(buildID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if !ok {
|
||||
@ -905,21 +906,18 @@ func (m *indexMeta) BuildIndex(buildID UniqueID) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetAllSegIndexes() map[int64]*model.SegmentIndex {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
tasks := m.segmentBuildInfo.List()
|
||||
segIndexes := make(map[int64]*model.SegmentIndex, len(tasks))
|
||||
for buildID, segIndex := range tasks {
|
||||
segIndexes[buildID] = segIndex
|
||||
for _, segIndex := range tasks {
|
||||
segIndexes[segIndex.BuildID] = segIndex
|
||||
}
|
||||
return segIndexes
|
||||
}
|
||||
|
||||
// SetStoredIndexFileSizeMetric returns the total index files size of all segment for each collection.
|
||||
func (m *indexMeta) SetStoredIndexFileSizeMetric(collections map[UniqueID]*collectionInfo) uint64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
var total uint64
|
||||
metrics.DataCoordStoredIndexFilesSize.Reset()
|
||||
@ -935,30 +933,41 @@ func (m *indexMeta) SetStoredIndexFileSizeMetric(collections map[UniqueID]*colle
|
||||
return total
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
func (m *indexMeta) removeSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
err := m.catalog.DropSegmentIndex(ctx, collID, partID, segID, buildID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := m.segmentIndexes[segID]; ok {
|
||||
delete(m.segmentIndexes[segID], indexID)
|
||||
segIndexes, ok := m.segmentIndexes.Get(segID)
|
||||
if ok {
|
||||
segIndexes.Remove(indexID)
|
||||
m.segmentIndexes.Insert(segID, segIndexes)
|
||||
}
|
||||
|
||||
if len(m.segmentIndexes[segID]) == 0 {
|
||||
delete(m.segmentIndexes, segID)
|
||||
if segIndexes.Len() == 0 {
|
||||
m.segmentIndexes.Remove(segID)
|
||||
}
|
||||
|
||||
m.segmentBuildInfo.Remove(buildID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
return m.removeSegmentIndex(ctx, collID, partID, segID, indexID, buildID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndexByID(ctx context.Context, buildID UniqueID) error {
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return m.removeSegmentIndex(ctx, segIdx.CollectionID, segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexID, buildID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetDeletedIndexes() []*model.Index {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
deletedIndexes := make([]*model.Index, 0)
|
||||
for _, fieldIndexes := range m.indexes {
|
||||
@ -972,8 +981,8 @@ func (m *indexMeta) GetDeletedIndexes() []*model.Index {
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
log.Ctx(ctx).Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID))
|
||||
err := m.catalog.DropIndex(ctx, collID, indexID)
|
||||
if err != nil {
|
||||
@ -995,9 +1004,6 @@ func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) e
|
||||
}
|
||||
|
||||
func (m *indexMeta) CheckCleanSegmentIndex(buildID UniqueID) (bool, *model.SegmentIndex) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if segIndex, ok := m.segmentBuildInfo.Get(buildID); ok {
|
||||
if segIndex.IndexState == commonpb.IndexState_Finished {
|
||||
return true, model.CloneSegmentIndex(segIndex)
|
||||
@ -1008,9 +1014,6 @@ func (m *indexMeta) CheckCleanSegmentIndex(buildID UniqueID) (bool, *model.Segme
|
||||
}
|
||||
|
||||
func (m *indexMeta) getSegmentsIndexStates(collectionID UniqueID, segmentIDs []UniqueID) map[int64]map[int64]*indexpb.SegmentIndexState {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
ret := make(map[int64]map[int64]*indexpb.SegmentIndexState, 0)
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
@ -1019,12 +1022,12 @@ func (m *indexMeta) getSegmentsIndexStates(collectionID UniqueID, segmentIDs []U
|
||||
|
||||
for _, segID := range segmentIDs {
|
||||
ret[segID] = make(map[int64]*indexpb.SegmentIndexState)
|
||||
segIndexInfos, ok := m.segmentIndexes[segID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, segIdx := range segIndexInfos {
|
||||
for _, segIdx := range segIndexInfos.Values() {
|
||||
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
|
||||
ret[segID][segIdx.IndexID] = &indexpb.SegmentIndexState{
|
||||
SegmentID: segID,
|
||||
@ -1077,8 +1080,9 @@ func (m *indexMeta) AreAllDiskIndex(collectionID int64, schema *schemapb.Collect
|
||||
}
|
||||
|
||||
func (m *indexMeta) HasIndex(collectionID int64) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexes, ok := m.indexes[collectionID]
|
||||
if ok {
|
||||
for _, index := range indexes {
|
||||
@ -1100,8 +1104,8 @@ func (m *indexMeta) TaskStatsJSON() string {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexJSON(collectionID int64) string {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
var indexMetrics []*metricsinfo.Index
|
||||
for collID, indexes := range m.indexes {
|
||||
@ -1131,24 +1135,25 @@ func (m *indexMeta) GetIndexJSON(collectionID int64) string {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentIndexedFields(collectionID UniqueID, segmentID UniqueID) (bool, []*metricsinfo.IndexedField) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
// the segment should be unindexed status if the collection has no indexes
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return false, []*metricsinfo.IndexedField{}
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
// the segment should be unindexed status if the segment indexes is not found
|
||||
segIndexInfos, ok := m.segmentIndexes[segmentID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segmentID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
return false, []*metricsinfo.IndexedField{}
|
||||
}
|
||||
|
||||
isIndexed := true
|
||||
var segmentIndexes []*metricsinfo.IndexedField
|
||||
for _, index := range fieldIndexes {
|
||||
if si, ok := segIndexInfos[index.IndexID]; !index.IsDeleted {
|
||||
if si, ok := segIndexInfos.Get(index.IndexID); !index.IsDeleted {
|
||||
buildID := int64(-1)
|
||||
if !ok {
|
||||
// the segment should be unindexed status if the segment index is not found within field indexes
|
||||
|
||||
@ -19,7 +19,6 @@ package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -37,7 +36,9 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func TestReloadFromKV(t *testing.T) {
|
||||
@ -442,12 +443,12 @@ func TestMeta_HasSameReq(t *testing.T) {
|
||||
|
||||
func newSegmentIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
|
||||
return &indexMeta{
|
||||
RWMutex: sync.RWMutex{},
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
}
|
||||
}
|
||||
|
||||
@ -516,9 +517,8 @@ func TestMeta_AddSegmentIndex(t *testing.T) {
|
||||
).Return(errors.New("fail"))
|
||||
|
||||
m := newSegmentIndexMeta(ec)
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: make(map[UniqueID]*model.SegmentIndex, 0),
|
||||
}
|
||||
m.segmentIndexes = typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
m.segmentIndexes.Insert(1, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
|
||||
segmentIndex := &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
@ -630,9 +630,8 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
|
||||
metakv.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything).Return(nil, nil, nil).Maybe()
|
||||
|
||||
m := newSegmentIndexMeta(&datacoord.Catalog{MetaKv: metakv})
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: make(map[UniqueID]*model.SegmentIndex, 0),
|
||||
}
|
||||
m.segmentIndexes = typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
m.segmentIndexes.Insert(1, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
|
||||
t.Run("collection has no index", func(t *testing.T) {
|
||||
state := m.GetSegmentIndexState(collID, segID, indexID)
|
||||
@ -735,26 +734,25 @@ func TestMeta_GetIndexedSegment(t *testing.T) {
|
||||
)
|
||||
|
||||
m := newSegmentIndexMeta(nil)
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
m.indexes = map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -891,27 +889,17 @@ func TestMeta_GetSegmentIndexes(t *testing.T) {
|
||||
|
||||
t.Run("no index exist- field index empty", func(t *testing.T) {
|
||||
m := newSegmentIndexMeta(nil)
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: {
|
||||
1: &model.SegmentIndex{},
|
||||
},
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{})
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
|
||||
segIndexes := m.GetSegmentIndexes(collID, 1)
|
||||
assert.Equal(t, 0, len(segIndexes))
|
||||
})
|
||||
|
||||
t.Run("index exists", func(t *testing.T) {
|
||||
m := &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: &model.SegmentIndex{
|
||||
CollectionID: collID,
|
||||
SegmentID: segID,
|
||||
IndexID: indexID,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -930,6 +918,14 @@ func TestMeta_GetSegmentIndexes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{
|
||||
CollectionID: collID,
|
||||
SegmentID: segID,
|
||||
IndexID: indexID,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
segIndexes := m.GetSegmentIndexes(collID, segID)
|
||||
assert.Equal(t, 1, len(segIndexes))
|
||||
|
||||
@ -1200,28 +1196,10 @@ func updateSegmentIndexMeta(t *testing.T) *indexMeta {
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
|
||||
return &indexMeta{
|
||||
catalog: sc,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
m := &indexMeta{
|
||||
catalog: sc,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -1241,6 +1219,25 @@ func updateSegmentIndexMeta(t *testing.T) *indexMeta {
|
||||
},
|
||||
segmentBuildInfo: indexBuildInfo,
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
return m
|
||||
}
|
||||
|
||||
func TestMeta_UpdateVersion(t *testing.T) {
|
||||
@ -1346,10 +1343,11 @@ func TestUpdateSegmentIndexNotExists(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
assert.Equal(t, 1, len(m.segmentIndexes))
|
||||
segmentIdx := m.segmentIndexes[1]
|
||||
assert.Equal(t, 1, len(segmentIdx))
|
||||
_, ok := segmentIdx[2]
|
||||
assert.Equal(t, 1, m.segmentIndexes.Len())
|
||||
segmentIdx, ok := m.segmentIndexes.Get(1)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, segmentIdx.Len())
|
||||
_, ok = segmentIdx.Get(2)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
@ -1488,19 +1486,67 @@ func TestRemoveSegmentIndex(t *testing.T) {
|
||||
Return(nil)
|
||||
|
||||
m := &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[int64]map[int64]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: &model.SegmentIndex{},
|
||||
},
|
||||
},
|
||||
catalog: catalog,
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
}
|
||||
m.segmentIndexes.Insert(segID, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
|
||||
err := m.RemoveSegmentIndex(context.TODO(), collID, partID, segID, indexID, buildID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(m.segmentIndexes), 0)
|
||||
assert.Equal(t, 0, m.segmentIndexes.Len())
|
||||
assert.Equal(t, len(m.segmentBuildInfo.List()), 0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveSegmentIndexByID(t *testing.T) {
|
||||
t.Run("drop segment index fail", func(t *testing.T) {
|
||||
expectedErr := errors.New("error")
|
||||
catalog := catalogmocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().
|
||||
DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(expectedErr)
|
||||
|
||||
catalog.EXPECT().CreateSegmentIndex(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
m := newSegmentIndexMeta(catalog)
|
||||
err := m.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 1024,
|
||||
IndexID: 1,
|
||||
BuildID: 4,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = m.RemoveSegmentIndexByID(context.TODO(), 4)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error")
|
||||
})
|
||||
|
||||
t.Run("remove segment index ok", func(t *testing.T) {
|
||||
catalog := catalogmocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().
|
||||
DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
catalog.EXPECT().CreateSegmentIndex(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
m := newSegmentIndexMeta(catalog)
|
||||
err := m.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 1024,
|
||||
IndexID: 1,
|
||||
BuildID: 4,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = m.RemoveSegmentIndexByID(context.TODO(), 4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, m.segmentIndexes.Len(), 0)
|
||||
assert.Equal(t, len(m.segmentBuildInfo.List()), 0)
|
||||
})
|
||||
}
|
||||
@ -1639,27 +1685,25 @@ func TestMeta_GetSegmentIndexStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10250,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 1,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
segID + 1: {},
|
||||
}
|
||||
m.segmentIndexes = typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10250,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 1,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
m.segmentIndexes.Insert(segID, segIdx)
|
||||
|
||||
t.Run("index exists", func(t *testing.T) {
|
||||
isIndexed, segmentIndexes := m.GetSegmentIndexedFields(collID, segID)
|
||||
|
||||
@ -47,6 +47,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func TestServerId(t *testing.T) {
|
||||
@ -398,159 +399,159 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 1: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 3: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 4: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 5: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID - 1: {
|
||||
indexID: {
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 1: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 3: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 4: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 5: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
indexMeta.segmentIndexes.Insert(segID-1, segIdx2)
|
||||
|
||||
mockHandler := NewNMockHandler(t)
|
||||
|
||||
@ -804,7 +805,7 @@ func TestServer_GetIndexState(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -861,31 +862,29 @@ func TestServer_GetIndexState(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 3000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_IndexStateNone,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
}
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 3000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_IndexStateNone,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx)
|
||||
for id, segment := range segments {
|
||||
s.meta.segments.SetSegment(id, segment)
|
||||
}
|
||||
@ -1404,158 +1403,7 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 1: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 3: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 4: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 5: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID - 1: {
|
||||
indexID: {
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 1: {
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 3: {
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 4: {
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 5: {
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -1563,6 +1411,158 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
segIdx2.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID-1, segIdx2)
|
||||
|
||||
for id, segment := range segments {
|
||||
s.meta.segments.SetSegment(id, segment)
|
||||
}
|
||||
@ -1720,7 +1720,7 @@ func TestServer_ListIndexes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -1909,95 +1909,7 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 1: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 3: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 4: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 5: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -2005,6 +1917,93 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 1,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 3,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 4,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx1.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID + 5,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
for id, segment := range segments {
|
||||
s.meta.segments.SetSegment(id, segment)
|
||||
}
|
||||
@ -2156,7 +2155,7 @@ func TestServer_DropIndex(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -2301,27 +2300,7 @@ func TestServer_GetIndexInfos(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -2330,6 +2309,25 @@ func TestServer_GetIndexInfos(t *testing.T) {
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: createTS,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
s.meta.segments.SetSegment(segID, &SegmentInfo{
|
||||
SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
@ -2395,7 +2393,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -2438,7 +2436,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
||||
assert.Equal(t, 1, len(segments))
|
||||
assert.Equal(t, segID, segments[0].ID)
|
||||
|
||||
m.indexMeta.segmentIndexes[segID] = make(map[UniqueID]*model.SegmentIndex)
|
||||
m.indexMeta.segmentIndexes.Insert(segID, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
m.indexMeta.updateSegmentIndex(&model.SegmentIndex{
|
||||
CollectionID: collID,
|
||||
SegmentID: segID,
|
||||
|
||||
@ -16,6 +16,8 @@ import (
|
||||
"github.com/milvus-io/milvus/internal/metastore/mocks"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type jobManagerSuite struct {
|
||||
@ -89,9 +91,11 @@ func (s *jobManagerSuite) TestJobManager_triggerStatsTaskLoop() {
|
||||
},
|
||||
},
|
||||
statsTaskMeta: &statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
tasks: make(map[int64]*indexpb.StatsTask),
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
},
|
||||
}
|
||||
|
||||
@ -103,7 +107,7 @@ func (s *jobManagerSuite) TestJobManager_triggerStatsTaskLoop() {
|
||||
scheduler: &taskScheduler{
|
||||
allocator: alloc,
|
||||
pendingTasks: newFairQueuePolicy(),
|
||||
runningTasks: make(map[UniqueID]Task),
|
||||
runningTasks: typeutil.NewConcurrentMap[UniqueID, Task](),
|
||||
meta: mt,
|
||||
taskStats: expirable.NewLRU[UniqueID, Task](512, nil, time.Minute*5),
|
||||
},
|
||||
|
||||
@ -649,6 +649,25 @@ func TestGetDistJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_getSegmentsJSON(t *testing.T) {
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(10, &model.SegmentIndex{
|
||||
SegmentID: 1000,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 10250,
|
||||
IndexID: 10,
|
||||
BuildID: 10000,
|
||||
NodeID: 1,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
segIndexes.Insert(1000, segIdx0)
|
||||
s := &Server{
|
||||
meta: &meta{
|
||||
segments: &SegmentsInfo{
|
||||
@ -664,26 +683,7 @@ func TestServer_getSegmentsJSON(t *testing.T) {
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1000: {
|
||||
10: &model.SegmentIndex{
|
||||
SegmentID: 1000,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 10250,
|
||||
IndexID: 10,
|
||||
BuildID: 10000,
|
||||
NodeID: 1,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
1: {
|
||||
10: &model.Index{
|
||||
|
||||
@ -19,36 +19,40 @@ package datacoord
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"strconv"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type statsTaskMeta struct {
|
||||
sync.RWMutex
|
||||
|
||||
ctx context.Context
|
||||
catalog metastore.DataCoordCatalog
|
||||
|
||||
// taskID -> analyzeStats
|
||||
tasks map[int64]*indexpb.StatsTask
|
||||
keyLock *lock.KeyLock[UniqueID]
|
||||
// taskID -> statsTask
|
||||
tasks *typeutil.ConcurrentMap[UniqueID, *indexpb.StatsTask]
|
||||
|
||||
// segmentID + SubJobType -> statsTask
|
||||
segmentID2Tasks *typeutil.ConcurrentMap[string, *indexpb.StatsTask]
|
||||
}
|
||||
|
||||
func newStatsTaskMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (*statsTaskMeta, error) {
|
||||
stm := &statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
tasks: make(map[int64]*indexpb.StatsTask),
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
}
|
||||
if err := stm.reloadFromKV(); err != nil {
|
||||
return nil, err
|
||||
@ -56,6 +60,10 @@ func newStatsTaskMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (
|
||||
return stm, nil
|
||||
}
|
||||
|
||||
func createSecondaryIndexKey(segmentID UniqueID, subJobType string) string {
|
||||
return strconv.FormatUint(uint64(segmentID), 10) + "-" + subJobType
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) reloadFromKV() error {
|
||||
record := timerecord.NewTimeRecorder("statsTaskMeta-reloadFromKV")
|
||||
// load stats task
|
||||
@ -65,7 +73,10 @@ func (stm *statsTaskMeta) reloadFromKV() error {
|
||||
return err
|
||||
}
|
||||
for _, t := range statsTasks {
|
||||
stm.tasks[t.GetTaskID()] = t
|
||||
stm.tasks.Insert(t.GetTaskID(), t)
|
||||
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, t)
|
||||
}
|
||||
|
||||
log.Info("statsTaskMeta reloadFromKV done", zap.Duration("duration", record.ElapseSpan()))
|
||||
@ -73,58 +84,54 @@ func (stm *statsTaskMeta) reloadFromKV() error {
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) updateMetrics() {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
|
||||
taskMetrics := make(map[UniqueID]map[indexpb.JobState]int)
|
||||
for _, t := range stm.tasks {
|
||||
if _, ok := taskMetrics[t.GetCollectionID()]; !ok {
|
||||
taskMetrics[t.GetCollectionID()] = make(map[indexpb.JobState]int)
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateNone] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateInit] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateInProgress] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateFinished] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateFailed] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateRetry] = 0
|
||||
}
|
||||
taskMetrics[t.GetCollectionID()][t.GetState()]++
|
||||
taskMetrics := make(map[indexpb.JobState]int)
|
||||
taskMetrics[indexpb.JobState_JobStateNone] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInit] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInProgress] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFinished] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFailed] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateRetry] = 0
|
||||
allTasks := stm.tasks.Values()
|
||||
for _, t := range allTasks {
|
||||
taskMetrics[t.GetState()]++
|
||||
}
|
||||
|
||||
jobType := indexpb.JobType_JobTypeStatsJob.String()
|
||||
for collID, m := range taskMetrics {
|
||||
for k, v := range m {
|
||||
metrics.TaskNum.WithLabelValues(strconv.FormatInt(collID, 10), jobType, k.String()).Set(float64(v))
|
||||
}
|
||||
for k, v := range taskMetrics {
|
||||
metrics.TaskNum.WithLabelValues(jobType, k.String()).Set(float64(v))
|
||||
}
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) AddStatsTask(t *indexpb.StatsTask) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
taskID := t.GetTaskID()
|
||||
|
||||
for _, st := range stm.tasks {
|
||||
if st.GetTaskID() == t.GetTaskID() || (st.GetSegmentID() == t.GetSegmentID() && st.GetSubJobType() == t.GetSubJobType() && st.GetState() != indexpb.JobState_JobStateFailed) {
|
||||
msg := fmt.Sprintf("stats task already exist in meta of segment %d with subJobType: %s",
|
||||
t.GetSegmentID(), t.GetSubJobType().String())
|
||||
log.RatedWarn(10, msg, zap.Int64("taskID", t.GetTaskID()), zap.Int64("exist taskID", st.GetTaskID()))
|
||||
return merr.WrapErrTaskDuplicate(indexpb.JobType_JobTypeStatsJob.String(), msg)
|
||||
}
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
task, alreadyExist := stm.segmentID2Tasks.Get(secondaryKey)
|
||||
if alreadyExist {
|
||||
msg := fmt.Sprintf("stats task already exist in meta of segment %d with subJobType: %s",
|
||||
t.GetSegmentID(), t.GetSubJobType().String())
|
||||
log.RatedWarn(10, msg, zap.Int64("taskID", t.GetTaskID()), zap.Int64("exist taskID", task.GetTaskID()))
|
||||
return merr.WrapErrTaskDuplicate(indexpb.JobType_JobTypeStatsJob.String(), msg)
|
||||
}
|
||||
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
log.Info("add stats task", zap.Int64("taskID", t.GetTaskID()), zap.Int64("originSegmentID", t.GetSegmentID()),
|
||||
zap.Int64("targetSegmentID", t.GetTargetSegmentID()), zap.String("subJobType", t.GetSubJobType().String()))
|
||||
t.State = indexpb.JobState_JobStateInit
|
||||
|
||||
if err := stm.catalog.SaveStatsTask(stm.ctx, t); err != nil {
|
||||
log.Warn("adding stats task failed",
|
||||
zap.Int64("taskID", t.GetTaskID()),
|
||||
zap.Int64("taskID", taskID),
|
||||
zap.Int64("segmentID", t.GetSegmentID()),
|
||||
zap.String("subJobType", t.GetSubJobType().String()),
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.GetTaskID()] = t
|
||||
stm.tasks.Insert(taskID, t)
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, t)
|
||||
|
||||
log.Info("add stats task success", zap.Int64("taskID", t.GetTaskID()), zap.Int64("originSegmentID", t.GetSegmentID()),
|
||||
zap.Int64("targetSegmentID", t.GetTargetSegmentID()), zap.String("subJobType", t.GetSubJobType().String()))
|
||||
@ -132,12 +139,12 @@ func (stm *statsTaskMeta) AddStatsTask(t *indexpb.StatsTask) error {
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) DropStatsTask(taskID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
log.Info("drop stats task by taskID", zap.Int64("taskID", taskID))
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
log.Info("remove stats task success, task already not exist", zap.Int64("taskID", taskID))
|
||||
return nil
|
||||
@ -150,17 +157,19 @@ func (stm *statsTaskMeta) DropStatsTask(taskID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(stm.tasks, taskID)
|
||||
stm.tasks.Remove(taskID)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Remove(secondaryKey)
|
||||
|
||||
log.Info("remove stats task success", zap.Int64("taskID", taskID), zap.Int64("segmentID", t.SegmentID))
|
||||
log.Info("remove stats task success", zap.Int64("taskID", taskID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) UpdateVersion(taskID, nodeID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -178,17 +187,19 @@ func (stm *statsTaskMeta) UpdateVersion(taskID, nodeID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
log.Info("update stats task version success", zap.Int64("taskID", taskID), zap.Int64("nodeID", nodeID),
|
||||
zap.Int64("newVersion", cloneT.GetVersion()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) UpdateBuildingTask(taskID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -204,17 +215,19 @@ func (stm *statsTaskMeta) UpdateBuildingTask(taskID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
|
||||
log.Info("update building stats task success", zap.Int64("taskID", taskID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) FinishTask(taskID int64, result *workerpb.StatsResult) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -231,77 +244,70 @@ func (stm *statsTaskMeta) FinishTask(taskID int64, result *workerpb.StatsResult)
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
|
||||
log.Info("finish stats task meta success", zap.Int64("taskID", taskID), zap.Int64("segmentID", t.SegmentID),
|
||||
zap.String("state", result.GetState().String()), zap.String("failReason", t.GetFailReason()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetStatsTaskState(taskID int64) indexpb.JobState {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
func (stm *statsTaskMeta) GetStatsTask(taskID int64) *indexpb.StatsTask {
|
||||
t, _ := stm.tasks.Get(taskID)
|
||||
return t
|
||||
}
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
func (stm *statsTaskMeta) GetStatsTaskState(taskID int64) indexpb.JobState {
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return indexpb.JobState_JobStateNone
|
||||
}
|
||||
return t.GetState()
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetStatsTaskStateBySegmentID(segmentID int64, jobType indexpb.StatsSubJob) indexpb.JobState {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
func (stm *statsTaskMeta) GetStatsTaskStateBySegmentID(segmentID int64, subJobType indexpb.StatsSubJob) indexpb.JobState {
|
||||
state := indexpb.JobState_JobStateNone
|
||||
|
||||
for _, t := range stm.tasks {
|
||||
if segmentID == t.GetSegmentID() && jobType == t.GetSubJobType() {
|
||||
return t.GetState()
|
||||
}
|
||||
secondaryKey := createSecondaryIndexKey(segmentID, subJobType.String())
|
||||
t, exists := stm.segmentID2Tasks.Get(secondaryKey)
|
||||
if exists {
|
||||
state = t.GetState()
|
||||
}
|
||||
|
||||
return indexpb.JobState_JobStateNone
|
||||
return state
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) CanCleanedTasks() []int64 {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
|
||||
needCleanedTaskIDs := make([]int64, 0)
|
||||
for taskID, t := range stm.tasks {
|
||||
if t.GetCanRecycle() && (t.GetState() == indexpb.JobState_JobStateFinished ||
|
||||
t.GetState() == indexpb.JobState_JobStateFailed) {
|
||||
needCleanedTaskIDs = append(needCleanedTaskIDs, taskID)
|
||||
stm.tasks.Range(func(key UniqueID, value *indexpb.StatsTask) bool {
|
||||
if value.GetCanRecycle() && (value.GetState() == indexpb.JobState_JobStateFinished ||
|
||||
value.GetState() == indexpb.JobState_JobStateFailed) {
|
||||
needCleanedTaskIDs = append(needCleanedTaskIDs, key)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
return needCleanedTaskIDs
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetAllTasks() map[int64]*indexpb.StatsTask {
|
||||
tasks := make(map[int64]*indexpb.StatsTask)
|
||||
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
for k, v := range stm.tasks {
|
||||
tasks[k] = proto.Clone(v).(*indexpb.StatsTask)
|
||||
allTasks := stm.tasks.Values()
|
||||
for _, v := range allTasks {
|
||||
tasks[v.GetTaskID()] = proto.Clone(v).(*indexpb.StatsTask)
|
||||
}
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetStatsTaskBySegmentID(segmentID int64, subJobType indexpb.StatsSubJob) *indexpb.StatsTask {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
|
||||
log.Info("get stats task by segmentID", zap.Int64("segmentID", segmentID),
|
||||
zap.String("subJobType", subJobType.String()))
|
||||
|
||||
for taskID, t := range stm.tasks {
|
||||
if t.GetSegmentID() == segmentID && t.GetSubJobType() == subJobType {
|
||||
log.Info("get stats task by segmentID success",
|
||||
zap.Int64("taskID", taskID),
|
||||
zap.Int64("segmentID", segmentID),
|
||||
zap.String("subJobType", subJobType.String()))
|
||||
return t
|
||||
}
|
||||
secondaryKey := createSecondaryIndexKey(segmentID, subJobType.String())
|
||||
t, exists := stm.segmentID2Tasks.Get(secondaryKey)
|
||||
if exists {
|
||||
log.Info("get stats task by segmentID success",
|
||||
zap.Int64("taskID", t.GetTaskID()),
|
||||
zap.Int64("segmentID", segmentID),
|
||||
zap.String("subJobType", subJobType.String()))
|
||||
return t
|
||||
}
|
||||
|
||||
log.Info("get stats task by segmentID failed, task not exist", zap.Int64("segmentID", segmentID),
|
||||
@ -310,12 +316,9 @@ func (stm *statsTaskMeta) GetStatsTaskBySegmentID(segmentID int64, subJobType in
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) MarkTaskCanRecycle(taskID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
|
||||
log.Info("mark stats task can recycle", zap.Int64("taskID", taskID))
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -331,7 +334,9 @@ func (stm *statsTaskMeta) MarkTaskCanRecycle(taskID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
|
||||
log.Info("mark stats task can recycle success", zap.Int64("taskID", taskID),
|
||||
zap.Int64("segmentID", t.SegmentID),
|
||||
|
||||
@ -108,7 +108,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.AddStatsTask(t))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.False(ok)
|
||||
})
|
||||
|
||||
@ -116,13 +116,13 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.AddStatsTask(t))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
})
|
||||
|
||||
s.Run("already exist", func() {
|
||||
s.Error(m.AddStatsTask(t))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
})
|
||||
})
|
||||
@ -132,13 +132,13 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.UpdateVersion(1, 1180))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(int64(1), task.GetVersion())
|
||||
})
|
||||
|
||||
s.Run("task not exist", func() {
|
||||
_, ok := m.tasks[100]
|
||||
_, ok := m.tasks.Get(100)
|
||||
s.False(ok)
|
||||
|
||||
s.Error(m.UpdateVersion(100, 1180))
|
||||
@ -148,7 +148,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.UpdateVersion(1, 1180))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
// still 1
|
||||
s.Equal(int64(1), task.GetVersion())
|
||||
@ -160,7 +160,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.UpdateBuildingTask(1))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInit, task.GetState())
|
||||
s.Equal(int64(1180), task.GetNodeID())
|
||||
@ -170,14 +170,14 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.UpdateBuildingTask(1))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, task.GetState())
|
||||
s.Equal(int64(1180), task.GetNodeID())
|
||||
})
|
||||
|
||||
s.Run("task not exist", func() {
|
||||
_, ok := m.tasks[100]
|
||||
_, ok := m.tasks.Get(100)
|
||||
s.False(ok)
|
||||
|
||||
s.Error(m.UpdateBuildingTask(100))
|
||||
@ -217,7 +217,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.FinishTask(1, result))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, task.GetState())
|
||||
})
|
||||
@ -226,7 +226,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.FinishTask(1, result))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateFinished, task.GetState())
|
||||
})
|
||||
@ -268,7 +268,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.DropStatsTask(1))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
})
|
||||
|
||||
@ -276,7 +276,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.DropStatsTask(1))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.False(ok)
|
||||
|
||||
s.NoError(m.DropStatsTask(1000))
|
||||
|
||||
@ -277,14 +277,17 @@ func (at *analyzeTask) QueryResult(ctx context.Context, client types.DataNodeCli
|
||||
// infos length is always one.
|
||||
for _, result := range resp.GetAnalyzeJobResults().GetResults() {
|
||||
if result.GetTaskID() == at.GetTaskID() {
|
||||
log.Ctx(ctx).Info("query analysis task info successfully",
|
||||
zap.Int64("taskID", at.GetTaskID()), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
if result.GetState() == indexpb.JobState_JobStateFinished || result.GetState() == indexpb.JobState_JobStateFailed ||
|
||||
result.GetState() == indexpb.JobState_JobStateRetry {
|
||||
log.Ctx(ctx).Info("query analysis task info successfully",
|
||||
zap.Int64("taskID", at.GetTaskID()), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
// state is retry or finished or failed
|
||||
at.setResult(result)
|
||||
} else if result.GetState() == indexpb.JobState_JobStateNone {
|
||||
log.Ctx(ctx).Info("query analysis task info successfully",
|
||||
zap.Int64("taskID", at.GetTaskID()), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
at.SetState(indexpb.JobState_JobStateRetry, "analyze task state is none in info response")
|
||||
}
|
||||
// inProgress or unissued/init, keep InProgress state
|
||||
@ -320,3 +323,7 @@ func (at *analyzeTask) DropTaskOnWorker(ctx context.Context, client types.DataNo
|
||||
func (at *analyzeTask) SetJobInfo(meta *meta) error {
|
||||
return meta.analyzeMeta.FinishTask(at.GetTaskID(), at.taskInfo)
|
||||
}
|
||||
|
||||
func (at *analyzeTask) DropTaskMeta(ctx context.Context, meta *meta) error {
|
||||
return meta.analyzeMeta.DropAnalyzeTask(ctx, at.GetTaskID())
|
||||
}
|
||||
|
||||
@ -317,14 +317,17 @@ func (it *indexBuildTask) QueryResult(ctx context.Context, node types.DataNodeCl
|
||||
// indexInfos length is always one.
|
||||
for _, info := range resp.GetIndexJobResults().GetResults() {
|
||||
if info.GetBuildID() == it.GetTaskID() {
|
||||
log.Ctx(ctx).Info("query task index info successfully",
|
||||
zap.Int64("taskID", it.GetTaskID()), zap.String("result state", info.GetState().String()),
|
||||
zap.String("failReason", info.GetFailReason()))
|
||||
if info.GetState() == commonpb.IndexState_Finished || info.GetState() == commonpb.IndexState_Failed ||
|
||||
info.GetState() == commonpb.IndexState_Retry {
|
||||
log.Ctx(ctx).Info("query task index info successfully",
|
||||
zap.Int64("taskID", it.GetTaskID()), zap.String("result state", info.GetState().String()),
|
||||
zap.String("failReason", info.GetFailReason()))
|
||||
// state is retry or finished or failed
|
||||
it.setResult(info)
|
||||
} else if info.GetState() == commonpb.IndexState_IndexStateNone {
|
||||
log.Ctx(ctx).Info("query task index info successfully",
|
||||
zap.Int64("taskID", it.GetTaskID()), zap.String("result state", info.GetState().String()),
|
||||
zap.String("failReason", info.GetFailReason()))
|
||||
it.SetState(indexpb.JobState_JobStateRetry, "index state is none in info response")
|
||||
}
|
||||
// inProgress or unissued, keep InProgress state
|
||||
@ -358,3 +361,7 @@ func (it *indexBuildTask) DropTaskOnWorker(ctx context.Context, client types.Dat
|
||||
func (it *indexBuildTask) SetJobInfo(meta *meta) error {
|
||||
return meta.indexMeta.FinishTask(it.taskInfo)
|
||||
}
|
||||
|
||||
func (it *indexBuildTask) DropTaskMeta(ctx context.Context, meta *meta) error {
|
||||
return meta.indexMeta.RemoveSegmentIndexByID(ctx, it.taskID)
|
||||
}
|
||||
|
||||
@ -33,6 +33,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type taskScheduler struct {
|
||||
@ -43,9 +44,8 @@ type taskScheduler struct {
|
||||
scheduleDuration time.Duration
|
||||
collectMetricsDuration time.Duration
|
||||
|
||||
pendingTasks schedulePolicy
|
||||
runningTasks map[UniqueID]Task
|
||||
runningQueueLock sync.RWMutex
|
||||
pendingTasks schedulePolicy
|
||||
runningTasks *typeutil.ConcurrentMap[UniqueID, Task]
|
||||
|
||||
taskLock *lock.KeyLock[int64]
|
||||
|
||||
@ -80,7 +80,7 @@ func newTaskScheduler(
|
||||
cancel: cancel,
|
||||
meta: metaTable,
|
||||
pendingTasks: newFairQueuePolicy(),
|
||||
runningTasks: make(map[UniqueID]Task),
|
||||
runningTasks: typeutil.NewConcurrentMap[UniqueID, Task](),
|
||||
notifyChan: make(chan struct{}, 1),
|
||||
taskLock: lock.NewKeyLock[int64](),
|
||||
scheduleDuration: Params.DataCoordCfg.IndexTaskSchedulerInterval.GetAsDuration(time.Millisecond),
|
||||
@ -125,6 +125,10 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
State: segIndex.IndexState,
|
||||
FailReason: segIndex.FailReason,
|
||||
},
|
||||
req: &workerpb.CreateJobRequest{
|
||||
ClusterID: Params.CommonCfg.ClusterPrefix.GetValue(),
|
||||
BuildID: segIndex.BuildID,
|
||||
},
|
||||
queueTime: time.Now(),
|
||||
startTime: time.Now(),
|
||||
endTime: time.Now(),
|
||||
@ -133,9 +137,7 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
case commonpb.IndexState_IndexStateNone, commonpb.IndexState_Unissued:
|
||||
s.pendingTasks.Push(task)
|
||||
case commonpb.IndexState_InProgress, commonpb.IndexState_Retry:
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[segIndex.BuildID] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(segIndex.BuildID, task)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,6 +152,10 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
State: t.State,
|
||||
FailReason: t.FailReason,
|
||||
},
|
||||
req: &workerpb.AnalyzeRequest{
|
||||
ClusterID: Params.CommonCfg.ClusterPrefix.GetValue(),
|
||||
TaskID: taskID,
|
||||
},
|
||||
queueTime: time.Now(),
|
||||
startTime: time.Now(),
|
||||
endTime: time.Now(),
|
||||
@ -158,9 +164,7 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
case indexpb.JobState_JobStateNone, indexpb.JobState_JobStateInit:
|
||||
s.pendingTasks.Push(task)
|
||||
case indexpb.JobState_JobStateInProgress, indexpb.JobState_JobStateRetry:
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[taskID] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(taskID, task)
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,6 +180,10 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
State: t.GetState(),
|
||||
FailReason: t.GetFailReason(),
|
||||
},
|
||||
req: &workerpb.CreateStatsRequest{
|
||||
ClusterID: Params.CommonCfg.ClusterPrefix.GetValue(),
|
||||
TaskID: taskID,
|
||||
},
|
||||
queueTime: time.Now(),
|
||||
startTime: time.Now(),
|
||||
endTime: time.Now(),
|
||||
@ -208,9 +216,7 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
task.taskInfo.FailReason = "segment is not exist or is l0 compacting"
|
||||
}
|
||||
}
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[taskID] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(taskID, task)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -228,34 +234,14 @@ func (s *taskScheduler) exist(taskID UniqueID) bool {
|
||||
if exist {
|
||||
return true
|
||||
}
|
||||
|
||||
s.runningQueueLock.RLock()
|
||||
defer s.runningQueueLock.RUnlock()
|
||||
_, ok := s.runningTasks[taskID]
|
||||
_, ok := s.runningTasks.Get(taskID)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *taskScheduler) getRunningTask(taskID UniqueID) Task {
|
||||
s.runningQueueLock.RLock()
|
||||
defer s.runningQueueLock.RUnlock()
|
||||
|
||||
return s.runningTasks[taskID]
|
||||
}
|
||||
|
||||
func (s *taskScheduler) removeRunningTask(taskID UniqueID) {
|
||||
s.runningQueueLock.Lock()
|
||||
defer s.runningQueueLock.Unlock()
|
||||
|
||||
delete(s.runningTasks, taskID)
|
||||
}
|
||||
|
||||
func (s *taskScheduler) enqueue(task Task) {
|
||||
defer s.notify()
|
||||
taskID := task.GetTaskID()
|
||||
|
||||
s.runningQueueLock.RLock()
|
||||
_, ok := s.runningTasks[taskID]
|
||||
s.runningQueueLock.RUnlock()
|
||||
_, ok := s.runningTasks.Get(taskID)
|
||||
if !ok {
|
||||
s.pendingTasks.Push(task)
|
||||
task.SetQueueTime(time.Now())
|
||||
@ -265,25 +251,21 @@ func (s *taskScheduler) enqueue(task Task) {
|
||||
|
||||
func (s *taskScheduler) AbortTask(taskID int64) {
|
||||
log.Ctx(s.ctx).Info("task scheduler receive abort task request", zap.Int64("taskID", taskID))
|
||||
s.taskLock.Lock(taskID)
|
||||
defer s.taskLock.Unlock(taskID)
|
||||
|
||||
task := s.pendingTasks.Get(taskID)
|
||||
if task != nil {
|
||||
s.taskLock.Lock(taskID)
|
||||
task.SetState(indexpb.JobState_JobStateFailed, "canceled")
|
||||
s.taskLock.Unlock(taskID)
|
||||
s.runningTasks.Insert(taskID, task)
|
||||
s.pendingTasks.Remove(taskID)
|
||||
return
|
||||
}
|
||||
|
||||
s.runningQueueLock.Lock()
|
||||
if task != nil {
|
||||
s.runningTasks[taskID] = task
|
||||
}
|
||||
if runningTask, ok := s.runningTasks[taskID]; ok {
|
||||
s.taskLock.Lock(taskID)
|
||||
if runningTask, ok := s.runningTasks.Get(taskID); ok {
|
||||
runningTask.SetState(indexpb.JobState_JobStateFailed, "canceled")
|
||||
s.taskLock.Unlock(taskID)
|
||||
s.runningTasks.Insert(taskID, runningTask)
|
||||
}
|
||||
s.runningQueueLock.Unlock()
|
||||
s.pendingTasks.Remove(taskID)
|
||||
}
|
||||
|
||||
func (s *taskScheduler) schedule() {
|
||||
@ -326,34 +308,29 @@ func (s *taskScheduler) checkProcessingTasksLoop() {
|
||||
}
|
||||
|
||||
func (s *taskScheduler) checkProcessingTasks() {
|
||||
runningTaskIDs := make([]UniqueID, 0)
|
||||
s.runningQueueLock.RLock()
|
||||
for taskID := range s.runningTasks {
|
||||
runningTaskIDs = append(runningTaskIDs, taskID)
|
||||
if s.runningTasks.Len() <= 0 {
|
||||
return
|
||||
}
|
||||
s.runningQueueLock.RUnlock()
|
||||
|
||||
log.Ctx(s.ctx).Info("check running tasks", zap.Int("runningTask num", len(runningTaskIDs)))
|
||||
log.Ctx(s.ctx).Info("check running tasks", zap.Int("runningTask num", s.runningTasks.Len()))
|
||||
|
||||
allRunningTasks := s.runningTasks.Values()
|
||||
var wg sync.WaitGroup
|
||||
sem := make(chan struct{}, 100)
|
||||
for _, taskID := range runningTaskIDs {
|
||||
for _, task := range allRunningTasks {
|
||||
wg.Add(1)
|
||||
sem <- struct{}{}
|
||||
taskID := taskID
|
||||
go func(taskID int64) {
|
||||
go func(task Task) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
<-sem
|
||||
}()
|
||||
task := s.getRunningTask(taskID)
|
||||
s.taskLock.Lock(taskID)
|
||||
s.taskLock.Lock(task.GetTaskID())
|
||||
suc := s.checkProcessingTask(task)
|
||||
s.taskLock.Unlock(taskID)
|
||||
s.taskLock.Unlock(task.GetTaskID())
|
||||
if suc {
|
||||
s.removeRunningTask(taskID)
|
||||
s.runningTasks.Remove(task.GetTaskID())
|
||||
}
|
||||
}(taskID)
|
||||
}(task)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@ -410,13 +387,13 @@ func (s *taskScheduler) run() {
|
||||
|
||||
switch task.GetState() {
|
||||
case indexpb.JobState_JobStateNone:
|
||||
return
|
||||
if !s.processNone(task) {
|
||||
s.pendingTasks.Push(task)
|
||||
}
|
||||
case indexpb.JobState_JobStateInit:
|
||||
s.pendingTasks.Push(task)
|
||||
default:
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[task.GetTaskID()] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(task.GetTaskID(), task)
|
||||
}
|
||||
}(task, nodeID)
|
||||
}
|
||||
@ -433,7 +410,7 @@ func (s *taskScheduler) process(task Task, nodeID int64) bool {
|
||||
|
||||
switch task.GetState() {
|
||||
case indexpb.JobState_JobStateNone:
|
||||
return true
|
||||
return s.processNone(task)
|
||||
case indexpb.JobState_JobStateInit:
|
||||
return s.processInit(task, nodeID)
|
||||
default:
|
||||
@ -505,11 +482,10 @@ func (s *taskScheduler) collectTaskMetrics() {
|
||||
collectPendingMetricsFunc(taskID)
|
||||
}
|
||||
|
||||
s.runningQueueLock.RLock()
|
||||
for _, task := range s.runningTasks {
|
||||
allRunningTasks := s.runningTasks.Values()
|
||||
for _, task := range allRunningTasks {
|
||||
collectRunningMetricsFunc(task)
|
||||
}
|
||||
s.runningQueueLock.RUnlock()
|
||||
|
||||
for taskType, queueingTime := range maxTaskQueueingTime {
|
||||
metrics.DataCoordTaskExecuteLatency.
|
||||
@ -577,6 +553,14 @@ func (s *taskScheduler) processInit(task Task, nodeID int64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *taskScheduler) processNone(task Task) bool {
|
||||
if err := task.DropTaskMeta(s.ctx, s.meta); err != nil {
|
||||
log.Ctx(s.ctx).Warn("set job info failed", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *taskScheduler) processFinished(task Task) bool {
|
||||
if err := task.SetJobInfo(s.meta); err != nil {
|
||||
log.Ctx(s.ctx).Warn("update task info failed", zap.Error(err))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -160,9 +160,10 @@ func (st *statsTask) UpdateMetaBuildingState(meta *meta) error {
|
||||
}
|
||||
|
||||
func (st *statsTask) PreCheck(ctx context.Context, dependency *taskScheduler) bool {
|
||||
log := log.Ctx(ctx).With(zap.Int64("taskID", st.taskID), zap.Int64("segmentID", st.segmentID))
|
||||
log := log.Ctx(ctx).With(zap.Int64("taskID", st.taskID), zap.Int64("segmentID", st.segmentID),
|
||||
zap.Int64("targetSegmentID", st.targetSegmentID))
|
||||
|
||||
statsMeta := dependency.meta.statsTaskMeta.GetStatsTaskBySegmentID(st.segmentID, st.subJobType)
|
||||
statsMeta := dependency.meta.statsTaskMeta.GetStatsTask(st.taskID)
|
||||
if statsMeta == nil {
|
||||
log.Warn("stats task meta is null, skip it")
|
||||
st.SetState(indexpb.JobState_JobStateNone, "stats task meta is null")
|
||||
@ -241,6 +242,9 @@ func (st *statsTask) PreCheck(ctx context.Context, dependency *taskScheduler) bo
|
||||
BinlogMaxSize: Params.DataNodeCfg.BinLogMaxSize.GetAsUint64(),
|
||||
}
|
||||
|
||||
log.Info("stats task pre check successfully", zap.String("subJobType", st.subJobType.String()),
|
||||
zap.Int64("num rows", segment.GetNumOfRows()), zap.Int64("task version", st.req.GetTaskVersion()))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -297,13 +301,16 @@ func (st *statsTask) QueryResult(ctx context.Context, client types.DataNodeClien
|
||||
|
||||
for _, result := range resp.GetStatsJobResults().GetResults() {
|
||||
if result.GetTaskID() == st.GetTaskID() {
|
||||
log.Ctx(ctx).Info("query stats task result success", zap.Int64("taskID", st.GetTaskID()),
|
||||
zap.Int64("segmentID", st.segmentID), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
if result.GetState() == indexpb.JobState_JobStateFinished || result.GetState() == indexpb.JobState_JobStateRetry ||
|
||||
result.GetState() == indexpb.JobState_JobStateFailed {
|
||||
log.Ctx(ctx).Info("query stats task result success", zap.Int64("taskID", st.GetTaskID()),
|
||||
zap.Int64("segmentID", st.segmentID), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
st.setResult(result)
|
||||
} else if result.GetState() == indexpb.JobState_JobStateNone {
|
||||
log.Ctx(ctx).Info("query stats task result success", zap.Int64("taskID", st.GetTaskID()),
|
||||
zap.Int64("segmentID", st.segmentID), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
st.SetState(indexpb.JobState_JobStateRetry, "stats task state is none in info response")
|
||||
}
|
||||
// inProgress or unissued/init, keep InProgress state
|
||||
@ -374,3 +381,12 @@ func (st *statsTask) SetJobInfo(meta *meta) error {
|
||||
zap.String("subJobType", st.subJobType.String()), zap.String("state", st.taskInfo.GetState().String()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *statsTask) DropTaskMeta(ctx context.Context, meta *meta) error {
|
||||
if err := meta.statsTaskMeta.DropStatsTask(st.taskID); err != nil {
|
||||
log.Ctx(ctx).Warn("drop stats task failed", zap.Int64("taskID", st.taskID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Ctx(ctx).Info("drop stats task success", zap.Int64("taskID", st.taskID))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -19,7 +19,6 @@ package datacoord
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -35,6 +34,8 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type statsTaskSuite struct {
|
||||
@ -55,6 +56,24 @@ func (s *statsTaskSuite) SetupSuite() {
|
||||
s.segID = 1179
|
||||
s.targetID = 1180
|
||||
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
SegmentID: s.segID,
|
||||
InsertChannel: "ch1",
|
||||
TaskID: s.taskID,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
Version: 0,
|
||||
NodeID: 0,
|
||||
State: indexpb.JobState_JobStateInit,
|
||||
FailReason: "",
|
||||
}
|
||||
tasks.Insert(s.taskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
|
||||
s.mt = &meta{
|
||||
segments: &SegmentsInfo{
|
||||
segments: map[int64]*SegmentInfo{
|
||||
@ -109,23 +128,11 @@ func (s *statsTaskSuite) SetupSuite() {
|
||||
},
|
||||
|
||||
statsTaskMeta: &statsTaskMeta{
|
||||
RWMutex: sync.RWMutex{},
|
||||
ctx: context.Background(),
|
||||
catalog: nil,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
s.taskID: {
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
SegmentID: s.segID,
|
||||
InsertChannel: "ch1",
|
||||
TaskID: s.taskID,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
Version: 0,
|
||||
NodeID: 0,
|
||||
State: indexpb.JobState_JobStateInit,
|
||||
FailReason: "",
|
||||
},
|
||||
},
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
ctx: context.Background(),
|
||||
catalog: nil,
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -595,7 +602,9 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
||||
|
||||
s.NoError(st.SetJobInfo(s.mt))
|
||||
s.NotNil(s.mt.GetHealthySegment(context.TODO(), s.targetID))
|
||||
s.Equal(indexpb.JobState_JobStateFinished, s.mt.statsTaskMeta.tasks[s.taskID].GetState())
|
||||
t, ok := s.mt.statsTaskMeta.tasks.Get(s.taskID)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateFinished, t.GetState())
|
||||
s.Equal(datapb.SegmentLevel_L2, s.mt.GetHealthySegment(context.TODO(), s.targetID).GetLevel())
|
||||
})
|
||||
})
|
||||
|
||||
@ -46,4 +46,5 @@ type Task interface {
|
||||
SetEndTime(time.Time)
|
||||
GetEndTime() time.Time
|
||||
GetTaskType() string
|
||||
DropTaskMeta(ctx context.Context, meta *meta) error
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ import (
|
||||
|
||||
// TaskQueue is a queue used to store tasks.
|
||||
type TaskQueue interface {
|
||||
utChan() <-chan int
|
||||
utChan() <-chan struct{}
|
||||
utEmpty() bool
|
||||
utFull() bool
|
||||
addUnissuedTask(t Task) error
|
||||
@ -54,12 +54,12 @@ type IndexTaskQueue struct {
|
||||
// maxTaskNum should keep still
|
||||
maxTaskNum int64
|
||||
|
||||
utBufChan chan int // to block scheduler
|
||||
utBufChan chan struct{} // to block scheduler
|
||||
|
||||
sched *TaskScheduler
|
||||
}
|
||||
|
||||
func (queue *IndexTaskQueue) utChan() <-chan int {
|
||||
func (queue *IndexTaskQueue) utChan() <-chan struct{} {
|
||||
return queue.utBufChan
|
||||
}
|
||||
|
||||
@ -79,7 +79,10 @@ func (queue *IndexTaskQueue) addUnissuedTask(t Task) error {
|
||||
return errors.New("index task queue is full")
|
||||
}
|
||||
queue.unissuedTasks.PushBack(t)
|
||||
queue.utBufChan <- 1
|
||||
select {
|
||||
case queue.utBufChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -159,7 +162,7 @@ func NewIndexBuildTaskQueue(sched *TaskScheduler) *IndexTaskQueue {
|
||||
activeTasks: make(map[string]Task),
|
||||
maxTaskNum: 1024,
|
||||
|
||||
utBufChan: make(chan int, 1024),
|
||||
utBufChan: make(chan struct{}, 1024),
|
||||
sched: sched,
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,6 +309,7 @@ var (
|
||||
}, []string{statusLabelName})
|
||||
|
||||
// IndexTaskNum records the number of index tasks of each type.
|
||||
// Deprecated: please ues TaskNum after v2.5.5.
|
||||
IndexTaskNum = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: milvusNamespace,
|
||||
@ -350,7 +351,7 @@ var (
|
||||
Help: "latency of task execute operation",
|
||||
Buckets: longTaskBuckets,
|
||||
}, []string{
|
||||
taskTypeLabel,
|
||||
TaskTypeLabel,
|
||||
statusLabelName,
|
||||
})
|
||||
|
||||
@ -361,7 +362,7 @@ var (
|
||||
Subsystem: typeutil.DataCoordRole,
|
||||
Name: "task_count",
|
||||
Help: "number of index tasks of each type",
|
||||
}, []string{collectionIDLabelName, taskTypeLabel, taskStateLabel})
|
||||
}, []string{TaskTypeLabel, TaskStateLabel})
|
||||
)
|
||||
|
||||
// RegisterDataCoord registers DataCoord metrics
|
||||
|
||||
@ -136,8 +136,8 @@ const (
|
||||
LoadedLabel = "loaded"
|
||||
NumEntitiesAllLabel = "all"
|
||||
|
||||
taskTypeLabel = "task_type"
|
||||
taskStateLabel = "task_state"
|
||||
TaskTypeLabel = "task_type"
|
||||
TaskStateLabel = "task_state"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@ -435,7 +435,7 @@ var (
|
||||
Subsystem: typeutil.ProxyRole,
|
||||
Name: "queue_task_num",
|
||||
Help: "",
|
||||
}, []string{nodeIDLabelName, queueTypeLabelName, taskStateLabel})
|
||||
}, []string{nodeIDLabelName, queueTypeLabelName, TaskStateLabel})
|
||||
|
||||
ProxyParseExpressionLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
|
||||
@ -132,7 +132,7 @@ var (
|
||||
Name: "task_latency",
|
||||
Help: "latency of all kind of task in query coord scheduler scheduler",
|
||||
Buckets: longTaskBuckets,
|
||||
}, []string{collectionIDLabelName, taskTypeLabel, channelNameLabelName})
|
||||
}, []string{collectionIDLabelName, TaskTypeLabel, channelNameLabelName})
|
||||
|
||||
QueryCoordResourceGroupInfo = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user