mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
enhance: Refine task meta with key lock (#40613)
issue: #39101 2.5 pr: #40146 #40353 Signed-off-by: Cai Zhang <cai.zhang@zilliz.com>
This commit is contained in:
parent
bf4fc6a8c6
commit
6dbe5d475e
@ -169,7 +169,7 @@ func Test_compactionTrigger_force_without_index(t *testing.T) {
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{},
|
||||
},
|
||||
collections: map[int64]*collectionInfo{
|
||||
@ -308,98 +308,8 @@ func Test_compactionTrigger_force(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
collectionID UniqueID
|
||||
wantErr bool
|
||||
wantSegIDs []int64
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test force compaction",
|
||||
fields{
|
||||
&meta{
|
||||
catalog: catalog,
|
||||
channelCPs: newChannelCps(),
|
||||
segments: &SegmentsInfo{
|
||||
segments: map[int64]*SegmentInfo{
|
||||
1: seg1,
|
||||
2: seg2,
|
||||
3: seg3,
|
||||
},
|
||||
secondaryIndexes: segmentInfoIndexes{
|
||||
coll2Segments: map[UniqueID]map[UniqueID]*SegmentInfo{
|
||||
2: {
|
||||
seg1.GetID(): seg1,
|
||||
seg2.GetID(): seg2,
|
||||
},
|
||||
1111: {
|
||||
seg3.GetID(): seg3,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: {
|
||||
indexID: {
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
2: {
|
||||
indexID: {
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
3: {
|
||||
indexID: {
|
||||
SegmentID: 3,
|
||||
CollectionID: 1111,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
@ -442,7 +352,98 @@ func Test_compactionTrigger_force(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIdx3 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx3.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
CollectionID: 1111,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
im.segmentIndexes.Insert(1, segIdx1)
|
||||
im.segmentIndexes.Insert(2, segIdx2)
|
||||
im.segmentIndexes.Insert(3, segIdx3)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
collectionID UniqueID
|
||||
wantErr bool
|
||||
wantSegIDs []int64
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test force compaction",
|
||||
fields{
|
||||
&meta{
|
||||
catalog: catalog,
|
||||
channelCPs: newChannelCps(),
|
||||
segments: &SegmentsInfo{
|
||||
segments: map[int64]*SegmentInfo{
|
||||
1: seg1,
|
||||
2: seg2,
|
||||
3: seg3,
|
||||
},
|
||||
secondaryIndexes: segmentInfoIndexes{
|
||||
coll2Segments: map[UniqueID]map[UniqueID]*SegmentInfo{
|
||||
2: {
|
||||
seg1.GetID(): seg1,
|
||||
seg2.GetID(): seg2,
|
||||
},
|
||||
1111: {
|
||||
seg3.GetID(): seg3,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -1200,9 +1201,9 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
|
||||
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
@ -1212,32 +1213,12 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
wantErr bool
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test small segment",
|
||||
fields{
|
||||
&meta{
|
||||
// 8 small segments
|
||||
channelCPs: newChannelCps(),
|
||||
|
||||
segments: mockSegmentsInfo(20, 20, 20, 20, 20, 20),
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: genSegIndex(1, indexID, 20),
|
||||
2: genSegIndex(2, indexID, 20),
|
||||
3: genSegIndex(3, indexID, 20),
|
||||
4: genSegIndex(4, indexID, 20),
|
||||
5: genSegIndex(5, indexID, 20),
|
||||
6: genSegIndex(6, indexID, 20),
|
||||
},
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
@ -1260,7 +1241,28 @@ func Test_compactionTrigger_PrioritizedCandi(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, genSegIndex(1, indexID, 20))
|
||||
im.segmentIndexes.Insert(2, genSegIndex(2, indexID, 20))
|
||||
im.segmentIndexes.Insert(3, genSegIndex(3, indexID, 20))
|
||||
im.segmentIndexes.Insert(4, genSegIndex(4, indexID, 20))
|
||||
im.segmentIndexes.Insert(5, genSegIndex(5, indexID, 20))
|
||||
im.segmentIndexes.Insert(6, genSegIndex(6, indexID, 20))
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
wantErr bool
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test small segment",
|
||||
fields{
|
||||
&meta{
|
||||
// 8 small segments
|
||||
channelCPs: newChannelCps(),
|
||||
|
||||
segments: mockSegmentsInfo(20, 20, 20, 20, 20, 20),
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -1341,9 +1343,9 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
|
||||
vecFieldID := int64(201)
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
@ -1353,34 +1355,11 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test small segment",
|
||||
fields{
|
||||
&meta{
|
||||
channelCPs: newChannelCps(),
|
||||
// 7 segments with 200MB each, the compaction is expected to be triggered
|
||||
// as the first 5 being merged, and 1 plus being squeezed.
|
||||
segments: mockSegmentsInfo(200, 200, 200, 200, 200, 200, 200),
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: genSegIndex(1, indexID, 20),
|
||||
2: genSegIndex(2, indexID, 20),
|
||||
3: genSegIndex(3, indexID, 20),
|
||||
4: genSegIndex(4, indexID, 20),
|
||||
5: genSegIndex(5, indexID, 20),
|
||||
6: genSegIndex(6, indexID, 20),
|
||||
7: genSegIndex(7, indexID, 20),
|
||||
},
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
@ -1403,7 +1382,30 @@ func Test_compactionTrigger_SmallCandi(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, genSegIndex(1, indexID, 20))
|
||||
im.segmentIndexes.Insert(2, genSegIndex(2, indexID, 20))
|
||||
im.segmentIndexes.Insert(3, genSegIndex(3, indexID, 20))
|
||||
im.segmentIndexes.Insert(4, genSegIndex(4, indexID, 20))
|
||||
im.segmentIndexes.Insert(5, genSegIndex(5, indexID, 20))
|
||||
im.segmentIndexes.Insert(6, genSegIndex(6, indexID, 20))
|
||||
im.segmentIndexes.Insert(7, genSegIndex(7, indexID, 20))
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test small segment",
|
||||
fields{
|
||||
&meta{
|
||||
channelCPs: newChannelCps(),
|
||||
// 7 segments with 200MB each, the compaction is expected to be triggered
|
||||
// as the first 5 being merged, and 1 plus being squeezed.
|
||||
segments: mockSegmentsInfo(200, 200, 200, 200, 200, 200, 200),
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -1484,9 +1486,9 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
|
||||
}
|
||||
vecFieldID := int64(201)
|
||||
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
genSegIndex := func(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
@ -1496,33 +1498,11 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
}
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test small segment",
|
||||
fields{
|
||||
&meta{
|
||||
channelCPs: newChannelCps(),
|
||||
|
||||
segments: mockSegmentsInfo(600, 600, 600, 600, 260, 260),
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: genSegIndex(1, indexID, 20),
|
||||
2: genSegIndex(2, indexID, 20),
|
||||
3: genSegIndex(3, indexID, 20),
|
||||
4: genSegIndex(4, indexID, 20),
|
||||
5: genSegIndex(5, indexID, 20),
|
||||
6: genSegIndex(6, indexID, 20),
|
||||
},
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
@ -1545,7 +1525,29 @@ func Test_compactionTrigger_SqueezeNonPlannedSegs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, genSegIndex(1, indexID, 20))
|
||||
im.segmentIndexes.Insert(2, genSegIndex(2, indexID, 20))
|
||||
im.segmentIndexes.Insert(3, genSegIndex(3, indexID, 20))
|
||||
im.segmentIndexes.Insert(4, genSegIndex(4, indexID, 20))
|
||||
im.segmentIndexes.Insert(5, genSegIndex(5, indexID, 20))
|
||||
im.segmentIndexes.Insert(6, genSegIndex(6, indexID, 20))
|
||||
mock0Allocator := newMockAllocator(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
wantPlans []*datapb.CompactionPlan
|
||||
}{
|
||||
{
|
||||
"test small segment",
|
||||
fields{
|
||||
&meta{
|
||||
channelCPs: newChannelCps(),
|
||||
|
||||
segments: mockSegmentsInfo(600, 600, 600, 600, 260, 260),
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
2: {
|
||||
ID: 2,
|
||||
@ -2191,9 +2193,9 @@ func (s *CompactionTriggerSuite) genSeg(segID, numRows int64) *datapb.SegmentInf
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CompactionTriggerSuite) genSegIndex(segID, indexID UniqueID, numRows int64) map[UniqueID]*model.SegmentIndex {
|
||||
return map[UniqueID]*model.SegmentIndex{
|
||||
indexID: {
|
||||
func (s *CompactionTriggerSuite) genSegIndex(segID, indexID UniqueID, numRows int64) *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex] {
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: s.collectionID,
|
||||
PartitionID: s.partitionID,
|
||||
@ -2203,8 +2205,8 @@ func (s *CompactionTriggerSuite) genSegIndex(segID, indexID UniqueID, numRows in
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
}
|
||||
})
|
||||
return segIdx
|
||||
}
|
||||
|
||||
func (s *CompactionTriggerSuite) SetupTest() {
|
||||
@ -2241,6 +2243,37 @@ func (s *CompactionTriggerSuite) SetupTest() {
|
||||
lastFlushTime: time.Now(),
|
||||
}
|
||||
|
||||
im := &indexMeta{
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
s.collectionID: {
|
||||
s.indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: s.collectionID,
|
||||
FieldID: s.vecFieldID,
|
||||
IndexID: s.indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
im.segmentIndexes.Insert(1, s.genSegIndex(1, indexID, 60))
|
||||
im.segmentIndexes.Insert(2, s.genSegIndex(2, indexID, 60))
|
||||
im.segmentIndexes.Insert(3, s.genSegIndex(3, indexID, 60))
|
||||
im.segmentIndexes.Insert(4, s.genSegIndex(4, indexID, 60))
|
||||
im.segmentIndexes.Insert(5, s.genSegIndex(5, indexID, 60))
|
||||
im.segmentIndexes.Insert(6, s.genSegIndex(6, indexID, 60))
|
||||
s.meta = &meta{
|
||||
channelCPs: newChannelCps(),
|
||||
catalog: catalog,
|
||||
@ -2276,38 +2309,7 @@ func (s *CompactionTriggerSuite) SetupTest() {
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: s.genSegIndex(1, indexID, 60),
|
||||
2: s.genSegIndex(2, indexID, 60),
|
||||
3: s.genSegIndex(3, indexID, 60),
|
||||
4: s.genSegIndex(4, indexID, 60),
|
||||
5: s.genSegIndex(5, indexID, 26),
|
||||
6: s.genSegIndex(6, indexID, 26),
|
||||
},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
s.collectionID: {
|
||||
s.indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: s.collectionID,
|
||||
FieldID: s.vecFieldID,
|
||||
IndexID: s.indexID,
|
||||
IndexName: "_default_idx",
|
||||
IsDeleted: false,
|
||||
CreateTime: 0,
|
||||
TypeParams: nil,
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: common.IndexTypeKey,
|
||||
Value: "HNSW",
|
||||
},
|
||||
},
|
||||
IsAutoIndex: false,
|
||||
UserIndexParams: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexMeta: im,
|
||||
collections: map[int64]*collectionInfo{
|
||||
s.collectionID: {
|
||||
ID: s.collectionID,
|
||||
@ -2731,6 +2733,45 @@ func Test_compactionTrigger_generatePlans(t *testing.T) {
|
||||
compactTime *compactTime
|
||||
expectedSize int64
|
||||
}
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(1, segIdx0)
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
})
|
||||
segIndexes.Insert(2, segIdx1)
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
@ -2758,46 +2799,7 @@ func Test_compactionTrigger_generatePlans(t *testing.T) {
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: {
|
||||
indexID: {
|
||||
SegmentID: 1,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 1,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
2: {
|
||||
indexID: {
|
||||
SegmentID: 2,
|
||||
CollectionID: 2,
|
||||
PartitionID: 1,
|
||||
NumRows: 100,
|
||||
IndexID: indexID,
|
||||
BuildID: 2,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
2: {
|
||||
indexID: {
|
||||
|
||||
@ -54,6 +54,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func Test_garbageCollector_basic(t *testing.T) {
|
||||
@ -476,17 +477,9 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
|
||||
},
|
||||
},
|
||||
}
|
||||
meta := &meta{
|
||||
RWMutex: lock.RWMutex{},
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
collections: nil,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -502,10 +495,9 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -521,9 +513,18 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
meta := &meta{
|
||||
RWMutex: lock.RWMutex{},
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
collections: nil,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{},
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
},
|
||||
@ -641,17 +642,9 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
|
||||
},
|
||||
},
|
||||
}
|
||||
meta := &meta{
|
||||
RWMutex: lock.RWMutex{},
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
collections: nil,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -667,10 +660,9 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -686,9 +678,18 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
meta := &meta{
|
||||
RWMutex: lock.RWMutex{},
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
collections: nil,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -1043,15 +1044,9 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
m := &meta{
|
||||
catalog: catalog,
|
||||
channelCPs: channelCPs,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1067,10 +1062,9 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
||||
IndexFileKeys: []string{"file1", "file2"},
|
||||
IndexSerializedSize: 1024,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1086,9 +1080,17 @@ func TestGarbageCollector_clearETCD(t *testing.T) {
|
||||
IndexFileKeys: []string{"file3", "file4"},
|
||||
IndexSerializedSize: 1024,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
m := &meta{
|
||||
catalog: catalog,
|
||||
channelCPs: channelCPs,
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
catalog: catalog,
|
||||
segmentIndexes: segIndexes,
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
|
||||
@ -44,6 +44,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/indexparams"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
||||
@ -52,19 +53,21 @@ import (
|
||||
)
|
||||
|
||||
type indexMeta struct {
|
||||
sync.RWMutex
|
||||
ctx context.Context
|
||||
catalog metastore.DataCoordCatalog
|
||||
|
||||
// collectionIndexes records which indexes are on the collection
|
||||
// collID -> indexID -> index
|
||||
fieldIndexLock sync.RWMutex
|
||||
indexes map[UniqueID]map[UniqueID]*model.Index
|
||||
|
||||
// buildID2Meta records building index meta information of the segment
|
||||
segmentBuildInfo *segmentBuildInfo
|
||||
|
||||
// buildID -> lock
|
||||
keyLock *lock.KeyLock[UniqueID]
|
||||
// segmentID -> indexID -> segmentIndex
|
||||
segmentIndexes map[UniqueID]map[UniqueID]*model.SegmentIndex
|
||||
segmentIndexes *typeutil.ConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]
|
||||
}
|
||||
|
||||
func newIndexTaskStats(s *model.SegmentIndex) *metricsinfo.IndexTaskStats {
|
||||
@ -86,7 +89,7 @@ func newIndexTaskStats(s *model.SegmentIndex) *metricsinfo.IndexTaskStats {
|
||||
type segmentBuildInfo struct {
|
||||
// buildID2Meta records the meta information of the segment
|
||||
// buildID -> segmentIndex
|
||||
buildID2SegmentIndex map[UniqueID]*model.SegmentIndex
|
||||
buildID2SegmentIndex *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]
|
||||
// taskStats records the task stats of the segment
|
||||
taskStats *expirable.LRU[UniqueID, *metricsinfo.IndexTaskStats]
|
||||
}
|
||||
@ -94,28 +97,28 @@ type segmentBuildInfo struct {
|
||||
func newSegmentIndexBuildInfo() *segmentBuildInfo {
|
||||
return &segmentBuildInfo{
|
||||
// build ID -> segment index
|
||||
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
|
||||
buildID2SegmentIndex: typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex](),
|
||||
// build ID -> task stats
|
||||
taskStats: expirable.NewLRU[UniqueID, *metricsinfo.IndexTaskStats](1024, nil, time.Minute*30),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) Add(segIdx *model.SegmentIndex) {
|
||||
m.buildID2SegmentIndex[segIdx.BuildID] = segIdx
|
||||
m.buildID2SegmentIndex.Insert(segIdx.BuildID, segIdx)
|
||||
m.taskStats.Add(segIdx.BuildID, newIndexTaskStats(segIdx))
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) Get(key UniqueID) (*model.SegmentIndex, bool) {
|
||||
value, exists := m.buildID2SegmentIndex[key]
|
||||
value, exists := m.buildID2SegmentIndex.Get(key)
|
||||
return value, exists
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) Remove(key UniqueID) {
|
||||
delete(m.buildID2SegmentIndex, key)
|
||||
m.buildID2SegmentIndex.Remove(key)
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) List() map[UniqueID]*model.SegmentIndex {
|
||||
return m.buildID2SegmentIndex
|
||||
func (m *segmentBuildInfo) List() []*model.SegmentIndex {
|
||||
return m.buildID2SegmentIndex.Values()
|
||||
}
|
||||
|
||||
func (m *segmentBuildInfo) GetTaskStats() []*metricsinfo.IndexTaskStats {
|
||||
@ -128,8 +131,9 @@ func newIndexMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (*ind
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
}
|
||||
err := mt.reloadFromKV()
|
||||
if err != nil {
|
||||
@ -174,12 +178,14 @@ func (m *indexMeta) updateCollectionIndex(index *model.Index) {
|
||||
}
|
||||
|
||||
func (m *indexMeta) updateSegmentIndex(segIdx *model.SegmentIndex) {
|
||||
indexes, ok := m.segmentIndexes[segIdx.SegmentID]
|
||||
indexes, ok := m.segmentIndexes.Get(segIdx.SegmentID)
|
||||
if ok {
|
||||
indexes[segIdx.IndexID] = segIdx
|
||||
indexes.Insert(segIdx.IndexID, segIdx)
|
||||
m.segmentIndexes.Insert(segIdx.SegmentID, indexes)
|
||||
} else {
|
||||
m.segmentIndexes[segIdx.SegmentID] = make(map[UniqueID]*model.SegmentIndex)
|
||||
m.segmentIndexes[segIdx.SegmentID][segIdx.IndexID] = segIdx
|
||||
indexes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
indexes.Insert(segIdx.IndexID, segIdx)
|
||||
m.segmentIndexes.Insert(segIdx.SegmentID, indexes)
|
||||
}
|
||||
m.segmentBuildInfo.Add(segIdx)
|
||||
}
|
||||
@ -206,36 +212,37 @@ func (m *indexMeta) updateSegIndexMeta(segIdx *model.SegmentIndex, updateFunc fu
|
||||
}
|
||||
|
||||
func (m *indexMeta) updateIndexTasksMetrics() {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
taskMetrics := make(map[UniqueID]map[commonpb.IndexState]int)
|
||||
taskMetrics := make(map[indexpb.JobState]int)
|
||||
taskMetrics[indexpb.JobState_JobStateNone] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInit] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInProgress] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFinished] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFailed] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateRetry] = 0
|
||||
for _, segIdx := range m.segmentBuildInfo.List() {
|
||||
if segIdx.IsDeleted || !m.isIndexExist(segIdx.CollectionID, segIdx.IndexID) {
|
||||
if segIdx.IsDeleted || !m.IsIndexExist(segIdx.CollectionID, segIdx.IndexID) {
|
||||
continue
|
||||
}
|
||||
if _, ok := taskMetrics[segIdx.CollectionID]; !ok {
|
||||
taskMetrics[segIdx.CollectionID] = make(map[commonpb.IndexState]int)
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Unissued] = 0
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_InProgress] = 0
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Finished] = 0
|
||||
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Failed] = 0
|
||||
}
|
||||
taskMetrics[segIdx.CollectionID][segIdx.IndexState]++
|
||||
}
|
||||
for collID, m := range taskMetrics {
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
|
||||
switch segIdx.IndexState {
|
||||
case commonpb.IndexState_IndexStateNone:
|
||||
taskMetrics[indexpb.JobState_JobStateNone]++
|
||||
case commonpb.IndexState_Unissued:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.UnissuedIndexTaskLabel).Set(float64(v))
|
||||
taskMetrics[indexpb.JobState_JobStateInit]++
|
||||
case commonpb.IndexState_InProgress:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.InProgressIndexTaskLabel).Set(float64(v))
|
||||
taskMetrics[indexpb.JobState_JobStateInProgress]++
|
||||
case commonpb.IndexState_Finished:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.FinishedIndexTaskLabel).Set(float64(v))
|
||||
taskMetrics[indexpb.JobState_JobStateFinished]++
|
||||
case commonpb.IndexState_Failed:
|
||||
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.FailedIndexTaskLabel).Set(float64(v))
|
||||
taskMetrics[indexpb.JobState_JobStateFailed]++
|
||||
case commonpb.IndexState_Retry:
|
||||
taskMetrics[indexpb.JobState_JobStateRetry]++
|
||||
}
|
||||
}
|
||||
|
||||
jobType := indexpb.JobType_JobTypeIndexJob.String()
|
||||
for k, v := range taskMetrics {
|
||||
metrics.TaskNum.WithLabelValues(jobType, k.String()).Set(float64(v))
|
||||
}
|
||||
log.Ctx(m.ctx).Info("update index metric", zap.Int("collectionNum", len(taskMetrics)))
|
||||
}
|
||||
@ -345,9 +352,10 @@ func checkParams(fieldIndex *model.Index, req *indexpb.CreateIndexRequest) bool
|
||||
return !notEq
|
||||
}
|
||||
|
||||
// CanCreateIndex currently is used in Unittest
|
||||
func (m *indexMeta) CanCreateIndex(req *indexpb.CreateIndexRequest, isJson bool) (UniqueID, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexes, ok := m.indexes[req.CollectionID]
|
||||
if !ok {
|
||||
@ -395,8 +403,8 @@ func (m *indexMeta) CanCreateIndex(req *indexpb.CreateIndexRequest, isJson bool)
|
||||
|
||||
// HasSameReq determine whether there are same indexing tasks.
|
||||
func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
for _, fieldIndex := range m.indexes[req.CollectionID] {
|
||||
if fieldIndex.IsDeleted {
|
||||
@ -420,8 +428,8 @@ func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID)
|
||||
func (m *indexMeta) CreateIndex(ctx context.Context, index *model.Index) error {
|
||||
log.Ctx(ctx).Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID),
|
||||
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName))
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
if err := m.catalog.CreateIndex(ctx, index); err != nil {
|
||||
log.Ctx(ctx).Error("meta update: CreateIndex save meta fail", zap.Int64("collectionID", index.CollectionID),
|
||||
@ -437,8 +445,8 @@ func (m *indexMeta) CreateIndex(ctx context.Context, index *model.Index) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
err := m.catalog.AlterIndexes(ctx, indexes)
|
||||
if err != nil {
|
||||
@ -454,10 +462,11 @@ func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) err
|
||||
|
||||
// AddSegmentIndex adds the index meta corresponding the indexBuildID to meta table.
|
||||
func (m *indexMeta) AddSegmentIndex(ctx context.Context, segIndex *model.SegmentIndex) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
buildID := segIndex.BuildID
|
||||
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
log.Ctx(ctx).Info("meta update: adding segment index", zap.Int64("collectionID", segIndex.CollectionID),
|
||||
zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID),
|
||||
zap.Int64("buildID", buildID))
|
||||
@ -477,8 +486,9 @@ func (m *indexMeta) AddSegmentIndex(ctx context.Context, segIndex *model.Segment
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexIDByName(collID int64, indexName string) map[int64]uint64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexID2CreateTs := make(map[int64]uint64)
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
@ -495,21 +505,22 @@ func (m *indexMeta) GetIndexIDByName(collID int64, indexName string) map[int64]u
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID) *indexpb.SegmentIndexState {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
state := &indexpb.SegmentIndexState{
|
||||
SegmentID: segmentID,
|
||||
State: commonpb.IndexState_IndexStateNone,
|
||||
FailReason: "",
|
||||
}
|
||||
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID)
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return state
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexes, ok := m.segmentIndexes[segmentID]
|
||||
indexes, ok := m.segmentIndexes.Get(segmentID)
|
||||
if !ok {
|
||||
state.State = commonpb.IndexState_Unissued
|
||||
state.FailReason = fmt.Sprintf("segment index not exist with ID: %d", segmentID)
|
||||
@ -517,7 +528,7 @@ func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID Uni
|
||||
}
|
||||
|
||||
if index, ok := fieldIndexes[indexID]; ok && !index.IsDeleted {
|
||||
if segIdx, ok := indexes[indexID]; ok {
|
||||
if segIdx, ok := indexes.Get(indexID); ok {
|
||||
state.IndexName = index.IndexName
|
||||
state.State = segIdx.IndexState
|
||||
state.FailReason = segIdx.FailReason
|
||||
@ -532,24 +543,24 @@ func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID Uni
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexedSegments(collectionID int64, segmentIDs, fieldIDs []UniqueID) []int64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
fieldIDSet := typeutil.NewUniqueSet(fieldIDs...)
|
||||
|
||||
checkSegmentState := func(indexes map[int64]*model.SegmentIndex) bool {
|
||||
checkSegmentState := func(indexes *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]) bool {
|
||||
indexedFields := 0
|
||||
for indexID, index := range fieldIndexes {
|
||||
if !fieldIDSet.Contain(index.FieldID) || index.IsDeleted {
|
||||
continue
|
||||
}
|
||||
|
||||
if segIdx, ok := indexes[indexID]; ok && segIdx.IndexState == commonpb.IndexState_Finished {
|
||||
if segIdx, ok := indexes.Get(indexID); ok && segIdx.IndexState == commonpb.IndexState_Finished {
|
||||
indexedFields += 1
|
||||
}
|
||||
}
|
||||
@ -559,7 +570,7 @@ func (m *indexMeta) GetIndexedSegments(collectionID int64, segmentIDs, fieldIDs
|
||||
|
||||
ret := make([]int64, 0)
|
||||
for _, sid := range segmentIDs {
|
||||
if indexes, ok := m.segmentIndexes[sid]; ok {
|
||||
if indexes, ok := m.segmentIndexes.Get(sid); ok {
|
||||
if checkSegmentState(indexes) {
|
||||
ret = append(ret, sid)
|
||||
}
|
||||
@ -571,8 +582,8 @@ func (m *indexMeta) GetIndexedSegments(collectionID int64, segmentIDs, fieldIDs
|
||||
|
||||
// GetIndexesForCollection gets all indexes info with the specified collection.
|
||||
func (m *indexMeta) GetIndexesForCollection(collID UniqueID, indexName string) []*model.Index {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexInfos := make([]*model.Index, 0)
|
||||
for _, index := range m.indexes[collID] {
|
||||
@ -587,8 +598,8 @@ func (m *indexMeta) GetIndexesForCollection(collID UniqueID, indexName string) [
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) []*model.Index {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexInfos := make([]*model.Index, 0)
|
||||
for _, index := range m.indexes[collID] {
|
||||
@ -607,8 +618,8 @@ func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, ind
|
||||
log.Ctx(ctx).Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID),
|
||||
zap.Int64s("indexIDs", indexIDs))
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
@ -641,23 +652,23 @@ func (m *indexMeta) MarkIndexAsDeleted(ctx context.Context, collID UniqueID, ind
|
||||
}
|
||||
|
||||
func (m *indexMeta) IsUnIndexedSegment(collectionID UniqueID, segID UniqueID) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return false
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
// the segment should be unindexed status if the fieldIndexes is not nil
|
||||
segIndexInfos, ok := m.segmentIndexes[segID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, index := range fieldIndexes {
|
||||
if _, ok := segIndexInfos[index.IndexID]; !index.IsDeleted {
|
||||
if !ok {
|
||||
if !index.IsDeleted {
|
||||
if _, ok := segIndexInfos.Get(index.IndexID); !ok {
|
||||
// the segment should be unindexed status if the segment index is not found within field indexes
|
||||
return true
|
||||
}
|
||||
@ -668,8 +679,6 @@ func (m *indexMeta) IsUnIndexedSegment(collectionID UniqueID, segID UniqueID) bo
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentsIndexes(collectionID UniqueID, segIDs []UniqueID) map[int64]map[UniqueID]*model.SegmentIndex {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
segmentsIndexes := make(map[int64]map[UniqueID]*model.SegmentIndex)
|
||||
for _, segmentID := range segIDs {
|
||||
segmentsIndexes[segmentID] = m.getSegmentIndexes(collectionID, segmentID)
|
||||
@ -678,16 +687,14 @@ func (m *indexMeta) GetSegmentsIndexes(collectionID UniqueID, segIDs []UniqueID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentIndexes(collectionID UniqueID, segID UniqueID) map[UniqueID]*model.SegmentIndex {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.getSegmentIndexes(collectionID, segID)
|
||||
}
|
||||
|
||||
// Note: thread-unsafe, don't call it outside indexMeta
|
||||
func (m *indexMeta) getSegmentIndexes(collectionID UniqueID, segID UniqueID) map[UniqueID]*model.SegmentIndex {
|
||||
ret := make(map[UniqueID]*model.SegmentIndex, 0)
|
||||
segIndexInfos, ok := m.segmentIndexes[segID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
return ret
|
||||
}
|
||||
|
||||
@ -696,7 +703,7 @@ func (m *indexMeta) getSegmentIndexes(collectionID UniqueID, segID UniqueID) map
|
||||
return ret
|
||||
}
|
||||
|
||||
for _, segIdx := range segIndexInfos {
|
||||
for _, segIdx := range segIndexInfos.Values() {
|
||||
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
|
||||
ret[segIdx.IndexID] = model.CloneSegmentIndex(segIdx)
|
||||
}
|
||||
@ -705,8 +712,8 @@ func (m *indexMeta) getSegmentIndexes(collectionID UniqueID, segID UniqueID) map
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
if fieldIndexes, ok := m.indexes[collID]; ok {
|
||||
if index, ok := fieldIndexes[indexID]; ok {
|
||||
@ -717,8 +724,9 @@ func (m *indexMeta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexNameByID(collID, indexID UniqueID) string {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
if fieldIndexes, ok := m.indexes[collID]; ok {
|
||||
if index, ok := fieldIndexes[indexID]; ok {
|
||||
return index.IndexName
|
||||
@ -728,8 +736,8 @@ func (m *indexMeta) GetIndexNameByID(collID, indexID UniqueID) string {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
@ -749,8 +757,8 @@ func (m *indexMeta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValu
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
@ -770,9 +778,6 @@ func (m *indexMeta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValue
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if ok {
|
||||
return model.CloneSegmentIndex(segIdx), true
|
||||
@ -782,13 +787,9 @@ func (m *indexMeta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
|
||||
}
|
||||
|
||||
func (m *indexMeta) IsIndexExist(collID, indexID UniqueID) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
return m.isIndexExist(collID, indexID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) isIndexExist(collID, indexID UniqueID) bool {
|
||||
fieldIndexes, ok := m.indexes[collID]
|
||||
if !ok {
|
||||
return false
|
||||
@ -802,8 +803,8 @@ func (m *indexMeta) isIndexExist(collID, indexID UniqueID) bool {
|
||||
|
||||
// UpdateVersion updates the version and nodeID of the index meta, whenever the task is built once, the version will be updated once.
|
||||
func (m *indexMeta) UpdateVersion(buildID, nodeID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
log.Ctx(m.ctx).Info("IndexCoord metaTable UpdateVersion receive", zap.Int64("buildID", buildID), zap.Int64("nodeID", nodeID))
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
@ -821,8 +822,8 @@ func (m *indexMeta) UpdateVersion(buildID, nodeID UniqueID) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) FinishTask(taskInfo *workerpb.IndexTaskInfo) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(taskInfo.GetBuildID())
|
||||
defer m.keyLock.Unlock(taskInfo.GetBuildID())
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(taskInfo.GetBuildID())
|
||||
if !ok {
|
||||
@ -854,8 +855,8 @@ func (m *indexMeta) FinishTask(taskInfo *workerpb.IndexTaskInfo) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) DeleteTask(buildID int64) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if !ok {
|
||||
@ -878,8 +879,8 @@ func (m *indexMeta) DeleteTask(buildID int64) error {
|
||||
|
||||
// BuildIndex set the index state to be InProgress. It means IndexNode is building the index.
|
||||
func (m *indexMeta) BuildIndex(buildID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.keyLock.Lock(buildID)
|
||||
defer m.keyLock.Unlock(buildID)
|
||||
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if !ok {
|
||||
@ -905,21 +906,18 @@ func (m *indexMeta) BuildIndex(buildID UniqueID) error {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetAllSegIndexes() map[int64]*model.SegmentIndex {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
tasks := m.segmentBuildInfo.List()
|
||||
segIndexes := make(map[int64]*model.SegmentIndex, len(tasks))
|
||||
for buildID, segIndex := range tasks {
|
||||
segIndexes[buildID] = segIndex
|
||||
for _, segIndex := range tasks {
|
||||
segIndexes[segIndex.BuildID] = segIndex
|
||||
}
|
||||
return segIndexes
|
||||
}
|
||||
|
||||
// SetStoredIndexFileSizeMetric returns the total index files size of all segment for each collection.
|
||||
func (m *indexMeta) SetStoredIndexFileSizeMetric(collections map[UniqueID]*collectionInfo) uint64 {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
|
||||
var total uint64
|
||||
metrics.DataCoordStoredIndexFilesSize.Reset()
|
||||
@ -935,30 +933,41 @@ func (m *indexMeta) SetStoredIndexFileSizeMetric(collections map[UniqueID]*colle
|
||||
return total
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
func (m *indexMeta) removeSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
err := m.catalog.DropSegmentIndex(ctx, collID, partID, segID, buildID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := m.segmentIndexes[segID]; ok {
|
||||
delete(m.segmentIndexes[segID], indexID)
|
||||
segIndexes, ok := m.segmentIndexes.Get(segID)
|
||||
if ok {
|
||||
segIndexes.Remove(indexID)
|
||||
m.segmentIndexes.Insert(segID, segIndexes)
|
||||
}
|
||||
|
||||
if len(m.segmentIndexes[segID]) == 0 {
|
||||
delete(m.segmentIndexes, segID)
|
||||
if segIndexes.Len() == 0 {
|
||||
m.segmentIndexes.Remove(segID)
|
||||
}
|
||||
|
||||
m.segmentBuildInfo.Remove(buildID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndex(ctx context.Context, collID, partID, segID, indexID, buildID UniqueID) error {
|
||||
return m.removeSegmentIndex(ctx, collID, partID, segID, indexID, buildID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveSegmentIndexByID(ctx context.Context, buildID UniqueID) error {
|
||||
segIdx, ok := m.segmentBuildInfo.Get(buildID)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return m.removeSegmentIndex(ctx, segIdx.CollectionID, segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexID, buildID)
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetDeletedIndexes() []*model.Index {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
deletedIndexes := make([]*model.Index, 0)
|
||||
for _, fieldIndexes := range m.indexes {
|
||||
@ -972,8 +981,8 @@ func (m *indexMeta) GetDeletedIndexes() []*model.Index {
|
||||
}
|
||||
|
||||
func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.fieldIndexLock.Lock()
|
||||
defer m.fieldIndexLock.Unlock()
|
||||
log.Ctx(ctx).Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID))
|
||||
err := m.catalog.DropIndex(ctx, collID, indexID)
|
||||
if err != nil {
|
||||
@ -995,9 +1004,6 @@ func (m *indexMeta) RemoveIndex(ctx context.Context, collID, indexID UniqueID) e
|
||||
}
|
||||
|
||||
func (m *indexMeta) CheckCleanSegmentIndex(buildID UniqueID) (bool, *model.SegmentIndex) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if segIndex, ok := m.segmentBuildInfo.Get(buildID); ok {
|
||||
if segIndex.IndexState == commonpb.IndexState_Finished {
|
||||
return true, model.CloneSegmentIndex(segIndex)
|
||||
@ -1008,9 +1014,6 @@ func (m *indexMeta) CheckCleanSegmentIndex(buildID UniqueID) (bool, *model.Segme
|
||||
}
|
||||
|
||||
func (m *indexMeta) getSegmentsIndexStates(collectionID UniqueID, segmentIDs []UniqueID) map[int64]map[int64]*indexpb.SegmentIndexState {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
ret := make(map[int64]map[int64]*indexpb.SegmentIndexState, 0)
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
@ -1019,12 +1022,12 @@ func (m *indexMeta) getSegmentsIndexStates(collectionID UniqueID, segmentIDs []U
|
||||
|
||||
for _, segID := range segmentIDs {
|
||||
ret[segID] = make(map[int64]*indexpb.SegmentIndexState)
|
||||
segIndexInfos, ok := m.segmentIndexes[segID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, segIdx := range segIndexInfos {
|
||||
for _, segIdx := range segIndexInfos.Values() {
|
||||
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
|
||||
ret[segID][segIdx.IndexID] = &indexpb.SegmentIndexState{
|
||||
SegmentID: segID,
|
||||
@ -1077,8 +1080,9 @@ func (m *indexMeta) AreAllDiskIndex(collectionID int64, schema *schemapb.Collect
|
||||
}
|
||||
|
||||
func (m *indexMeta) HasIndex(collectionID int64) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
indexes, ok := m.indexes[collectionID]
|
||||
if ok {
|
||||
for _, index := range indexes {
|
||||
@ -1100,8 +1104,8 @@ func (m *indexMeta) TaskStatsJSON() string {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetIndexJSON(collectionID int64) string {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
defer m.fieldIndexLock.RUnlock()
|
||||
|
||||
var indexMetrics []*metricsinfo.Index
|
||||
for collID, indexes := range m.indexes {
|
||||
@ -1131,24 +1135,25 @@ func (m *indexMeta) GetIndexJSON(collectionID int64) string {
|
||||
}
|
||||
|
||||
func (m *indexMeta) GetSegmentIndexedFields(collectionID UniqueID, segmentID UniqueID) (bool, []*metricsinfo.IndexedField) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
m.fieldIndexLock.RLock()
|
||||
fieldIndexes, ok := m.indexes[collectionID]
|
||||
if !ok {
|
||||
// the segment should be unindexed status if the collection has no indexes
|
||||
m.fieldIndexLock.RUnlock()
|
||||
return false, []*metricsinfo.IndexedField{}
|
||||
}
|
||||
m.fieldIndexLock.RUnlock()
|
||||
|
||||
// the segment should be unindexed status if the segment indexes is not found
|
||||
segIndexInfos, ok := m.segmentIndexes[segmentID]
|
||||
if !ok || len(segIndexInfos) == 0 {
|
||||
segIndexInfos, ok := m.segmentIndexes.Get(segmentID)
|
||||
if !ok || segIndexInfos.Len() == 0 {
|
||||
return false, []*metricsinfo.IndexedField{}
|
||||
}
|
||||
|
||||
isIndexed := true
|
||||
var segmentIndexes []*metricsinfo.IndexedField
|
||||
for _, index := range fieldIndexes {
|
||||
if si, ok := segIndexInfos[index.IndexID]; !index.IsDeleted {
|
||||
if si, ok := segIndexInfos.Get(index.IndexID); !index.IsDeleted {
|
||||
buildID := int64(-1)
|
||||
if !ok {
|
||||
// the segment should be unindexed status if the segment index is not found within field indexes
|
||||
|
||||
@ -19,7 +19,6 @@ package datacoord
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -37,7 +36,9 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func TestReloadFromKV(t *testing.T) {
|
||||
@ -442,12 +443,12 @@ func TestMeta_HasSameReq(t *testing.T) {
|
||||
|
||||
func newSegmentIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
|
||||
return &indexMeta{
|
||||
RWMutex: sync.RWMutex{},
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
}
|
||||
}
|
||||
|
||||
@ -516,9 +517,8 @@ func TestMeta_AddSegmentIndex(t *testing.T) {
|
||||
).Return(errors.New("fail"))
|
||||
|
||||
m := newSegmentIndexMeta(ec)
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: make(map[UniqueID]*model.SegmentIndex, 0),
|
||||
}
|
||||
m.segmentIndexes = typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
m.segmentIndexes.Insert(1, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
|
||||
segmentIndex := &model.SegmentIndex{
|
||||
SegmentID: 1,
|
||||
@ -630,9 +630,8 @@ func TestMeta_GetSegmentIndexState(t *testing.T) {
|
||||
metakv.EXPECT().LoadWithPrefix(mock.Anything, mock.Anything).Return(nil, nil, nil).Maybe()
|
||||
|
||||
m := newSegmentIndexMeta(&datacoord.Catalog{MetaKv: metakv})
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: make(map[UniqueID]*model.SegmentIndex, 0),
|
||||
}
|
||||
m.segmentIndexes = typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
m.segmentIndexes.Insert(1, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
|
||||
t.Run("collection has no index", func(t *testing.T) {
|
||||
state := m.GetSegmentIndexState(collID, segID, indexID)
|
||||
@ -735,9 +734,8 @@ func TestMeta_GetIndexedSegment(t *testing.T) {
|
||||
)
|
||||
|
||||
m := newSegmentIndexMeta(nil)
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -752,9 +750,9 @@ func TestMeta_GetIndexedSegment(t *testing.T) {
|
||||
CreatedUTCTime: 10,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
m.indexes = map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -891,27 +889,17 @@ func TestMeta_GetSegmentIndexes(t *testing.T) {
|
||||
|
||||
t.Run("no index exist- field index empty", func(t *testing.T) {
|
||||
m := newSegmentIndexMeta(nil)
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1: {
|
||||
1: &model.SegmentIndex{},
|
||||
},
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{})
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
|
||||
segIndexes := m.GetSegmentIndexes(collID, 1)
|
||||
assert.Equal(t, 0, len(segIndexes))
|
||||
})
|
||||
|
||||
t.Run("index exists", func(t *testing.T) {
|
||||
m := &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: &model.SegmentIndex{
|
||||
CollectionID: collID,
|
||||
SegmentID: segID,
|
||||
IndexID: indexID,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -930,6 +918,14 @@ func TestMeta_GetSegmentIndexes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{
|
||||
CollectionID: collID,
|
||||
SegmentID: segID,
|
||||
IndexID: indexID,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
})
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
segIndexes := m.GetSegmentIndexes(collID, segID)
|
||||
assert.Equal(t, 1, len(segIndexes))
|
||||
|
||||
@ -1200,28 +1196,10 @@ func updateSegmentIndexMeta(t *testing.T) *indexMeta {
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
|
||||
return &indexMeta{
|
||||
m := &indexMeta{
|
||||
catalog: sc,
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -1241,6 +1219,25 @@ func updateSegmentIndexMeta(t *testing.T) *indexMeta {
|
||||
},
|
||||
segmentBuildInfo: indexBuildInfo,
|
||||
}
|
||||
segIdxes := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdxes.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
m.segmentIndexes.Insert(segID, segIdxes)
|
||||
return m
|
||||
}
|
||||
|
||||
func TestMeta_UpdateVersion(t *testing.T) {
|
||||
@ -1346,10 +1343,11 @@ func TestUpdateSegmentIndexNotExists(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
assert.Equal(t, 1, len(m.segmentIndexes))
|
||||
segmentIdx := m.segmentIndexes[1]
|
||||
assert.Equal(t, 1, len(segmentIdx))
|
||||
_, ok := segmentIdx[2]
|
||||
assert.Equal(t, 1, m.segmentIndexes.Len())
|
||||
segmentIdx, ok := m.segmentIndexes.Get(1)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, 1, segmentIdx.Len())
|
||||
_, ok = segmentIdx.Get(2)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
@ -1489,18 +1487,66 @@ func TestRemoveSegmentIndex(t *testing.T) {
|
||||
|
||||
m := &indexMeta{
|
||||
catalog: catalog,
|
||||
segmentIndexes: map[int64]map[int64]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: &model.SegmentIndex{},
|
||||
},
|
||||
},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
}
|
||||
m.segmentIndexes.Insert(segID, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
|
||||
err := m.RemoveSegmentIndex(context.TODO(), collID, partID, segID, indexID, buildID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(m.segmentIndexes), 0)
|
||||
assert.Equal(t, 0, m.segmentIndexes.Len())
|
||||
assert.Equal(t, len(m.segmentBuildInfo.List()), 0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveSegmentIndexByID(t *testing.T) {
|
||||
t.Run("drop segment index fail", func(t *testing.T) {
|
||||
expectedErr := errors.New("error")
|
||||
catalog := catalogmocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().
|
||||
DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(expectedErr)
|
||||
|
||||
catalog.EXPECT().CreateSegmentIndex(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
m := newSegmentIndexMeta(catalog)
|
||||
err := m.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 1024,
|
||||
IndexID: 1,
|
||||
BuildID: 4,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = m.RemoveSegmentIndexByID(context.TODO(), 4)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error")
|
||||
})
|
||||
|
||||
t.Run("remove segment index ok", func(t *testing.T) {
|
||||
catalog := catalogmocks.NewDataCoordCatalog(t)
|
||||
catalog.EXPECT().
|
||||
DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(nil)
|
||||
|
||||
catalog.EXPECT().CreateSegmentIndex(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
m := newSegmentIndexMeta(catalog)
|
||||
err := m.AddSegmentIndex(context.TODO(), &model.SegmentIndex{
|
||||
SegmentID: 3,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 1024,
|
||||
IndexID: 1,
|
||||
BuildID: 4,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = m.RemoveSegmentIndexByID(context.TODO(), 4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, m.segmentIndexes.Len(), 0)
|
||||
assert.Equal(t, len(m.segmentBuildInfo.List()), 0)
|
||||
})
|
||||
}
|
||||
@ -1639,9 +1685,9 @@ func TestMeta_GetSegmentIndexStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
m.segmentIndexes = map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
m.segmentIndexes = typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1656,10 +1702,8 @@ func TestMeta_GetSegmentIndexStatus(t *testing.T) {
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
segID + 1: {},
|
||||
}
|
||||
})
|
||||
m.segmentIndexes.Insert(segID, segIdx)
|
||||
|
||||
t.Run("index exists", func(t *testing.T) {
|
||||
isIndexed, segmentIndexes := m.GetSegmentIndexedFields(collID, segID)
|
||||
|
||||
@ -47,6 +47,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
func TestServerId(t *testing.T) {
|
||||
@ -398,9 +399,10 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -416,8 +418,8 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 1: {
|
||||
})
|
||||
segIdx1.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -433,8 +435,8 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 3: {
|
||||
})
|
||||
segIdx1.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -450,8 +452,8 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 4: {
|
||||
})
|
||||
segIdx1.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -467,8 +469,8 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 5: {
|
||||
})
|
||||
segIdx1.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -484,10 +486,11 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID - 1: {
|
||||
indexID: {
|
||||
})
|
||||
indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -498,9 +501,9 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 1: {
|
||||
SegmentID: segID,
|
||||
})
|
||||
segIdx2.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
@ -510,9 +513,9 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 3: {
|
||||
SegmentID: segID,
|
||||
})
|
||||
segIdx2.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
@ -522,9 +525,9 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 4: {
|
||||
SegmentID: segID,
|
||||
})
|
||||
segIdx2.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
@ -535,9 +538,9 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 5: {
|
||||
SegmentID: segID,
|
||||
})
|
||||
segIdx2.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 10000,
|
||||
@ -547,10 +550,8 @@ func TestServer_AlterIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
indexMeta.segmentIndexes.Insert(segID-1, segIdx2)
|
||||
|
||||
mockHandler := NewNMockHandler(t)
|
||||
|
||||
@ -804,7 +805,7 @@ func TestServer_GetIndexState(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -861,9 +862,12 @@ func TestServer_GetIndexState(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
segments: NewSegmentsInfo(),
|
||||
}
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -879,13 +883,8 @@ func TestServer_GetIndexState(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
}
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx)
|
||||
for id, segment := range segments {
|
||||
s.meta.segments.SetSegment(id, segment)
|
||||
}
|
||||
@ -1404,9 +1403,16 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
},
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1422,8 +1428,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 1: {
|
||||
})
|
||||
segIdx1.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1439,8 +1445,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 3: {
|
||||
})
|
||||
segIdx1.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1456,8 +1462,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 4: {
|
||||
})
|
||||
segIdx1.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1473,8 +1479,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 5: {
|
||||
})
|
||||
segIdx1.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1490,10 +1496,11 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
segID - 1: {
|
||||
indexID: {
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1504,8 +1511,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 1: {
|
||||
})
|
||||
segIdx2.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1516,8 +1523,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 3: {
|
||||
})
|
||||
segIdx2.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1528,8 +1535,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 4: {
|
||||
})
|
||||
segIdx2.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1541,8 +1548,8 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "mock failed",
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
indexID + 5: {
|
||||
})
|
||||
segIdx2.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID - 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1553,16 +1560,9 @@ func TestServer_DescribeIndex(t *testing.T) {
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
CreatedUTCTime: createTS,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID-1, segIdx2)
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
},
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
for id, segment := range segments {
|
||||
s.meta.segments.SetSegment(id, segment)
|
||||
}
|
||||
@ -1720,7 +1720,7 @@ func TestServer_ListIndexes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -1909,9 +1909,16 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
},
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1927,8 +1934,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 1: {
|
||||
})
|
||||
segIdx1.Insert(indexID+1, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1944,8 +1951,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 3: {
|
||||
})
|
||||
segIdx1.Insert(indexID+3, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1961,8 +1968,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 4: {
|
||||
})
|
||||
segIdx1.Insert(indexID+4, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1978,8 +1985,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
indexID + 5: {
|
||||
})
|
||||
segIdx1.Insert(indexID+5, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -1995,16 +2002,8 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
},
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
for id, segment := range segments {
|
||||
s.meta.segments.SetSegment(id, segment)
|
||||
}
|
||||
@ -2156,7 +2155,7 @@ func TestServer_DropIndex(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
@ -2301,9 +2300,17 @@ func TestServer_GetIndexInfos(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
chunkManager: cli,
|
||||
},
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
@ -2319,17 +2326,8 @@ func TestServer_GetIndexInfos(t *testing.T) {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
WriteHandoff: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
segments: NewSegmentsInfo(),
|
||||
chunkManager: cli,
|
||||
},
|
||||
allocator: mock0Allocator,
|
||||
notifyIndexChan: make(chan UniqueID, 1),
|
||||
}
|
||||
})
|
||||
s.meta.indexMeta.segmentIndexes.Insert(segID, segIdx1)
|
||||
s.meta.segments.SetSegment(segID, &SegmentInfo{
|
||||
SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: segID,
|
||||
@ -2395,7 +2393,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
||||
segments: NewSegmentsInfo(),
|
||||
indexMeta: &indexMeta{
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{},
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -2438,7 +2436,7 @@ func TestMeta_GetHasUnindexTaskSegments(t *testing.T) {
|
||||
assert.Equal(t, 1, len(segments))
|
||||
assert.Equal(t, segID, segments[0].ID)
|
||||
|
||||
m.indexMeta.segmentIndexes[segID] = make(map[UniqueID]*model.SegmentIndex)
|
||||
m.indexMeta.segmentIndexes.Insert(segID, typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]())
|
||||
m.indexMeta.updateSegmentIndex(&model.SegmentIndex{
|
||||
CollectionID: collID,
|
||||
SegmentID: segID,
|
||||
|
||||
@ -16,6 +16,8 @@ import (
|
||||
"github.com/milvus-io/milvus/internal/metastore/mocks"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type jobManagerSuite struct {
|
||||
@ -91,7 +93,9 @@ func (s *jobManagerSuite) TestJobManager_triggerStatsTaskLoop() {
|
||||
statsTaskMeta: &statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
tasks: make(map[int64]*indexpb.StatsTask),
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
},
|
||||
}
|
||||
|
||||
@ -103,7 +107,7 @@ func (s *jobManagerSuite) TestJobManager_triggerStatsTaskLoop() {
|
||||
scheduler: &taskScheduler{
|
||||
allocator: alloc,
|
||||
pendingTasks: newFairQueuePolicy(),
|
||||
runningTasks: make(map[UniqueID]Task),
|
||||
runningTasks: typeutil.NewConcurrentMap[UniqueID, Task](),
|
||||
meta: mt,
|
||||
taskStats: expirable.NewLRU[UniqueID, Task](512, nil, time.Minute*5),
|
||||
},
|
||||
|
||||
@ -649,6 +649,25 @@ func TestGetDistJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_getSegmentsJSON(t *testing.T) {
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(10, &model.SegmentIndex{
|
||||
SegmentID: 1000,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 10250,
|
||||
IndexID: 10,
|
||||
BuildID: 10000,
|
||||
NodeID: 1,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
segIndexes.Insert(1000, segIdx0)
|
||||
s := &Server{
|
||||
meta: &meta{
|
||||
segments: &SegmentsInfo{
|
||||
@ -664,26 +683,7 @@ func TestServer_getSegmentsJSON(t *testing.T) {
|
||||
},
|
||||
},
|
||||
indexMeta: &indexMeta{
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
1000: {
|
||||
10: &model.SegmentIndex{
|
||||
SegmentID: 1000,
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
NumRows: 10250,
|
||||
IndexID: 10,
|
||||
BuildID: 10000,
|
||||
NodeID: 1,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 12,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
1: {
|
||||
10: &model.Index{
|
||||
|
||||
@ -19,36 +19,40 @@ package datacoord
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"strconv"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore"
|
||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/timerecord"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type statsTaskMeta struct {
|
||||
sync.RWMutex
|
||||
|
||||
ctx context.Context
|
||||
catalog metastore.DataCoordCatalog
|
||||
|
||||
// taskID -> analyzeStats
|
||||
tasks map[int64]*indexpb.StatsTask
|
||||
keyLock *lock.KeyLock[UniqueID]
|
||||
// taskID -> statsTask
|
||||
tasks *typeutil.ConcurrentMap[UniqueID, *indexpb.StatsTask]
|
||||
|
||||
// segmentID + SubJobType -> statsTask
|
||||
segmentID2Tasks *typeutil.ConcurrentMap[string, *indexpb.StatsTask]
|
||||
}
|
||||
|
||||
func newStatsTaskMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (*statsTaskMeta, error) {
|
||||
stm := &statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
tasks: make(map[int64]*indexpb.StatsTask),
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
}
|
||||
if err := stm.reloadFromKV(); err != nil {
|
||||
return nil, err
|
||||
@ -56,6 +60,10 @@ func newStatsTaskMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (
|
||||
return stm, nil
|
||||
}
|
||||
|
||||
func createSecondaryIndexKey(segmentID UniqueID, subJobType string) string {
|
||||
return strconv.FormatUint(uint64(segmentID), 10) + "-" + subJobType
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) reloadFromKV() error {
|
||||
record := timerecord.NewTimeRecorder("statsTaskMeta-reloadFromKV")
|
||||
// load stats task
|
||||
@ -65,7 +73,10 @@ func (stm *statsTaskMeta) reloadFromKV() error {
|
||||
return err
|
||||
}
|
||||
for _, t := range statsTasks {
|
||||
stm.tasks[t.GetTaskID()] = t
|
||||
stm.tasks.Insert(t.GetTaskID(), t)
|
||||
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, t)
|
||||
}
|
||||
|
||||
log.Info("statsTaskMeta reloadFromKV done", zap.Duration("duration", record.ElapseSpan()))
|
||||
@ -73,43 +84,38 @@ func (stm *statsTaskMeta) reloadFromKV() error {
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) updateMetrics() {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
|
||||
taskMetrics := make(map[UniqueID]map[indexpb.JobState]int)
|
||||
for _, t := range stm.tasks {
|
||||
if _, ok := taskMetrics[t.GetCollectionID()]; !ok {
|
||||
taskMetrics[t.GetCollectionID()] = make(map[indexpb.JobState]int)
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateNone] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateInit] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateInProgress] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateFinished] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateFailed] = 0
|
||||
taskMetrics[t.GetCollectionID()][indexpb.JobState_JobStateRetry] = 0
|
||||
}
|
||||
taskMetrics[t.GetCollectionID()][t.GetState()]++
|
||||
taskMetrics := make(map[indexpb.JobState]int)
|
||||
taskMetrics[indexpb.JobState_JobStateNone] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInit] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateInProgress] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFinished] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateFailed] = 0
|
||||
taskMetrics[indexpb.JobState_JobStateRetry] = 0
|
||||
allTasks := stm.tasks.Values()
|
||||
for _, t := range allTasks {
|
||||
taskMetrics[t.GetState()]++
|
||||
}
|
||||
|
||||
jobType := indexpb.JobType_JobTypeStatsJob.String()
|
||||
for collID, m := range taskMetrics {
|
||||
for k, v := range m {
|
||||
metrics.TaskNum.WithLabelValues(strconv.FormatInt(collID, 10), jobType, k.String()).Set(float64(v))
|
||||
}
|
||||
for k, v := range taskMetrics {
|
||||
metrics.TaskNum.WithLabelValues(jobType, k.String()).Set(float64(v))
|
||||
}
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) AddStatsTask(t *indexpb.StatsTask) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
taskID := t.GetTaskID()
|
||||
|
||||
for _, st := range stm.tasks {
|
||||
if st.GetTaskID() == t.GetTaskID() || (st.GetSegmentID() == t.GetSegmentID() && st.GetSubJobType() == t.GetSubJobType() && st.GetState() != indexpb.JobState_JobStateFailed) {
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
task, alreadyExist := stm.segmentID2Tasks.Get(secondaryKey)
|
||||
if alreadyExist {
|
||||
msg := fmt.Sprintf("stats task already exist in meta of segment %d with subJobType: %s",
|
||||
t.GetSegmentID(), t.GetSubJobType().String())
|
||||
log.RatedWarn(10, msg, zap.Int64("taskID", t.GetTaskID()), zap.Int64("exist taskID", st.GetTaskID()))
|
||||
log.RatedWarn(10, msg, zap.Int64("taskID", t.GetTaskID()), zap.Int64("exist taskID", task.GetTaskID()))
|
||||
return merr.WrapErrTaskDuplicate(indexpb.JobType_JobTypeStatsJob.String(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
log.Info("add stats task", zap.Int64("taskID", t.GetTaskID()), zap.Int64("originSegmentID", t.GetSegmentID()),
|
||||
zap.Int64("targetSegmentID", t.GetTargetSegmentID()), zap.String("subJobType", t.GetSubJobType().String()))
|
||||
@ -117,14 +123,15 @@ func (stm *statsTaskMeta) AddStatsTask(t *indexpb.StatsTask) error {
|
||||
|
||||
if err := stm.catalog.SaveStatsTask(stm.ctx, t); err != nil {
|
||||
log.Warn("adding stats task failed",
|
||||
zap.Int64("taskID", t.GetTaskID()),
|
||||
zap.Int64("taskID", taskID),
|
||||
zap.Int64("segmentID", t.GetSegmentID()),
|
||||
zap.String("subJobType", t.GetSubJobType().String()),
|
||||
zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.GetTaskID()] = t
|
||||
stm.tasks.Insert(taskID, t)
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, t)
|
||||
|
||||
log.Info("add stats task success", zap.Int64("taskID", t.GetTaskID()), zap.Int64("originSegmentID", t.GetSegmentID()),
|
||||
zap.Int64("targetSegmentID", t.GetTargetSegmentID()), zap.String("subJobType", t.GetSubJobType().String()))
|
||||
@ -132,12 +139,12 @@ func (stm *statsTaskMeta) AddStatsTask(t *indexpb.StatsTask) error {
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) DropStatsTask(taskID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
log.Info("drop stats task by taskID", zap.Int64("taskID", taskID))
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
log.Info("remove stats task success, task already not exist", zap.Int64("taskID", taskID))
|
||||
return nil
|
||||
@ -150,17 +157,19 @@ func (stm *statsTaskMeta) DropStatsTask(taskID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(stm.tasks, taskID)
|
||||
stm.tasks.Remove(taskID)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Remove(secondaryKey)
|
||||
|
||||
log.Info("remove stats task success", zap.Int64("taskID", taskID), zap.Int64("segmentID", t.SegmentID))
|
||||
log.Info("remove stats task success", zap.Int64("taskID", taskID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) UpdateVersion(taskID, nodeID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -178,17 +187,19 @@ func (stm *statsTaskMeta) UpdateVersion(taskID, nodeID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
log.Info("update stats task version success", zap.Int64("taskID", taskID), zap.Int64("nodeID", nodeID),
|
||||
zap.Int64("newVersion", cloneT.GetVersion()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) UpdateBuildingTask(taskID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -204,17 +215,19 @@ func (stm *statsTaskMeta) UpdateBuildingTask(taskID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
|
||||
log.Info("update building stats task success", zap.Int64("taskID", taskID))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) FinishTask(taskID int64, result *workerpb.StatsResult) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
stm.keyLock.Lock(taskID)
|
||||
defer stm.keyLock.Unlock(taskID)
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -231,78 +244,71 @@ func (stm *statsTaskMeta) FinishTask(taskID int64, result *workerpb.StatsResult)
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
|
||||
log.Info("finish stats task meta success", zap.Int64("taskID", taskID), zap.Int64("segmentID", t.SegmentID),
|
||||
zap.String("state", result.GetState().String()), zap.String("failReason", t.GetFailReason()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetStatsTaskState(taskID int64) indexpb.JobState {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
func (stm *statsTaskMeta) GetStatsTask(taskID int64) *indexpb.StatsTask {
|
||||
t, _ := stm.tasks.Get(taskID)
|
||||
return t
|
||||
}
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
func (stm *statsTaskMeta) GetStatsTaskState(taskID int64) indexpb.JobState {
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return indexpb.JobState_JobStateNone
|
||||
}
|
||||
return t.GetState()
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetStatsTaskStateBySegmentID(segmentID int64, jobType indexpb.StatsSubJob) indexpb.JobState {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
func (stm *statsTaskMeta) GetStatsTaskStateBySegmentID(segmentID int64, subJobType indexpb.StatsSubJob) indexpb.JobState {
|
||||
state := indexpb.JobState_JobStateNone
|
||||
|
||||
for _, t := range stm.tasks {
|
||||
if segmentID == t.GetSegmentID() && jobType == t.GetSubJobType() {
|
||||
return t.GetState()
|
||||
secondaryKey := createSecondaryIndexKey(segmentID, subJobType.String())
|
||||
t, exists := stm.segmentID2Tasks.Get(secondaryKey)
|
||||
if exists {
|
||||
state = t.GetState()
|
||||
}
|
||||
}
|
||||
|
||||
return indexpb.JobState_JobStateNone
|
||||
return state
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) CanCleanedTasks() []int64 {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
|
||||
needCleanedTaskIDs := make([]int64, 0)
|
||||
for taskID, t := range stm.tasks {
|
||||
if t.GetCanRecycle() && (t.GetState() == indexpb.JobState_JobStateFinished ||
|
||||
t.GetState() == indexpb.JobState_JobStateFailed) {
|
||||
needCleanedTaskIDs = append(needCleanedTaskIDs, taskID)
|
||||
}
|
||||
stm.tasks.Range(func(key UniqueID, value *indexpb.StatsTask) bool {
|
||||
if value.GetCanRecycle() && (value.GetState() == indexpb.JobState_JobStateFinished ||
|
||||
value.GetState() == indexpb.JobState_JobStateFailed) {
|
||||
needCleanedTaskIDs = append(needCleanedTaskIDs, key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return needCleanedTaskIDs
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetAllTasks() map[int64]*indexpb.StatsTask {
|
||||
tasks := make(map[int64]*indexpb.StatsTask)
|
||||
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
for k, v := range stm.tasks {
|
||||
tasks[k] = proto.Clone(v).(*indexpb.StatsTask)
|
||||
allTasks := stm.tasks.Values()
|
||||
for _, v := range allTasks {
|
||||
tasks[v.GetTaskID()] = proto.Clone(v).(*indexpb.StatsTask)
|
||||
}
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) GetStatsTaskBySegmentID(segmentID int64, subJobType indexpb.StatsSubJob) *indexpb.StatsTask {
|
||||
stm.RLock()
|
||||
defer stm.RUnlock()
|
||||
|
||||
log.Info("get stats task by segmentID", zap.Int64("segmentID", segmentID),
|
||||
zap.String("subJobType", subJobType.String()))
|
||||
|
||||
for taskID, t := range stm.tasks {
|
||||
if t.GetSegmentID() == segmentID && t.GetSubJobType() == subJobType {
|
||||
secondaryKey := createSecondaryIndexKey(segmentID, subJobType.String())
|
||||
t, exists := stm.segmentID2Tasks.Get(secondaryKey)
|
||||
if exists {
|
||||
log.Info("get stats task by segmentID success",
|
||||
zap.Int64("taskID", taskID),
|
||||
zap.Int64("taskID", t.GetTaskID()),
|
||||
zap.Int64("segmentID", segmentID),
|
||||
zap.String("subJobType", subJobType.String()))
|
||||
return t
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("get stats task by segmentID failed, task not exist", zap.Int64("segmentID", segmentID),
|
||||
zap.String("subJobType", subJobType.String()))
|
||||
@ -310,12 +316,9 @@ func (stm *statsTaskMeta) GetStatsTaskBySegmentID(segmentID int64, subJobType in
|
||||
}
|
||||
|
||||
func (stm *statsTaskMeta) MarkTaskCanRecycle(taskID int64) error {
|
||||
stm.Lock()
|
||||
defer stm.Unlock()
|
||||
|
||||
log.Info("mark stats task can recycle", zap.Int64("taskID", taskID))
|
||||
|
||||
t, ok := stm.tasks[taskID]
|
||||
t, ok := stm.tasks.Get(taskID)
|
||||
if !ok {
|
||||
return fmt.Errorf("task %d not found", taskID)
|
||||
}
|
||||
@ -331,7 +334,9 @@ func (stm *statsTaskMeta) MarkTaskCanRecycle(taskID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
stm.tasks[t.TaskID] = cloneT
|
||||
stm.tasks.Insert(taskID, cloneT)
|
||||
secondaryKey := createSecondaryIndexKey(t.GetSegmentID(), t.GetSubJobType().String())
|
||||
stm.segmentID2Tasks.Insert(secondaryKey, cloneT)
|
||||
|
||||
log.Info("mark stats task can recycle success", zap.Int64("taskID", taskID),
|
||||
zap.Int64("segmentID", t.SegmentID),
|
||||
|
||||
@ -108,7 +108,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.AddStatsTask(t))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.False(ok)
|
||||
})
|
||||
|
||||
@ -116,13 +116,13 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.AddStatsTask(t))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
})
|
||||
|
||||
s.Run("already exist", func() {
|
||||
s.Error(m.AddStatsTask(t))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
})
|
||||
})
|
||||
@ -132,13 +132,13 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.UpdateVersion(1, 1180))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(int64(1), task.GetVersion())
|
||||
})
|
||||
|
||||
s.Run("task not exist", func() {
|
||||
_, ok := m.tasks[100]
|
||||
_, ok := m.tasks.Get(100)
|
||||
s.False(ok)
|
||||
|
||||
s.Error(m.UpdateVersion(100, 1180))
|
||||
@ -148,7 +148,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.UpdateVersion(1, 1180))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
// still 1
|
||||
s.Equal(int64(1), task.GetVersion())
|
||||
@ -160,7 +160,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.UpdateBuildingTask(1))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInit, task.GetState())
|
||||
s.Equal(int64(1180), task.GetNodeID())
|
||||
@ -170,14 +170,14 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.UpdateBuildingTask(1))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, task.GetState())
|
||||
s.Equal(int64(1180), task.GetNodeID())
|
||||
})
|
||||
|
||||
s.Run("task not exist", func() {
|
||||
_, ok := m.tasks[100]
|
||||
_, ok := m.tasks.Get(100)
|
||||
s.False(ok)
|
||||
|
||||
s.Error(m.UpdateBuildingTask(100))
|
||||
@ -217,7 +217,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.FinishTask(1, result))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, task.GetState())
|
||||
})
|
||||
@ -226,7 +226,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.FinishTask(1, result))
|
||||
task, ok := m.tasks[1]
|
||||
task, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateFinished, task.GetState())
|
||||
})
|
||||
@ -268,7 +268,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(fmt.Errorf("mock error")).Once()
|
||||
|
||||
s.Error(m.DropStatsTask(1))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.True(ok)
|
||||
})
|
||||
|
||||
@ -276,7 +276,7 @@ func (s *statsTaskMetaSuite) Test_Method() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
s.NoError(m.DropStatsTask(1))
|
||||
_, ok := m.tasks[1]
|
||||
_, ok := m.tasks.Get(1)
|
||||
s.False(ok)
|
||||
|
||||
s.NoError(m.DropStatsTask(1000))
|
||||
|
||||
@ -277,14 +277,17 @@ func (at *analyzeTask) QueryResult(ctx context.Context, client types.DataNodeCli
|
||||
// infos length is always one.
|
||||
for _, result := range resp.GetAnalyzeJobResults().GetResults() {
|
||||
if result.GetTaskID() == at.GetTaskID() {
|
||||
if result.GetState() == indexpb.JobState_JobStateFinished || result.GetState() == indexpb.JobState_JobStateFailed ||
|
||||
result.GetState() == indexpb.JobState_JobStateRetry {
|
||||
log.Ctx(ctx).Info("query analysis task info successfully",
|
||||
zap.Int64("taskID", at.GetTaskID()), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
if result.GetState() == indexpb.JobState_JobStateFinished || result.GetState() == indexpb.JobState_JobStateFailed ||
|
||||
result.GetState() == indexpb.JobState_JobStateRetry {
|
||||
// state is retry or finished or failed
|
||||
at.setResult(result)
|
||||
} else if result.GetState() == indexpb.JobState_JobStateNone {
|
||||
log.Ctx(ctx).Info("query analysis task info successfully",
|
||||
zap.Int64("taskID", at.GetTaskID()), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
at.SetState(indexpb.JobState_JobStateRetry, "analyze task state is none in info response")
|
||||
}
|
||||
// inProgress or unissued/init, keep InProgress state
|
||||
@ -320,3 +323,7 @@ func (at *analyzeTask) DropTaskOnWorker(ctx context.Context, client types.DataNo
|
||||
func (at *analyzeTask) SetJobInfo(meta *meta) error {
|
||||
return meta.analyzeMeta.FinishTask(at.GetTaskID(), at.taskInfo)
|
||||
}
|
||||
|
||||
func (at *analyzeTask) DropTaskMeta(ctx context.Context, meta *meta) error {
|
||||
return meta.analyzeMeta.DropAnalyzeTask(ctx, at.GetTaskID())
|
||||
}
|
||||
|
||||
@ -317,14 +317,17 @@ func (it *indexBuildTask) QueryResult(ctx context.Context, node types.DataNodeCl
|
||||
// indexInfos length is always one.
|
||||
for _, info := range resp.GetIndexJobResults().GetResults() {
|
||||
if info.GetBuildID() == it.GetTaskID() {
|
||||
if info.GetState() == commonpb.IndexState_Finished || info.GetState() == commonpb.IndexState_Failed ||
|
||||
info.GetState() == commonpb.IndexState_Retry {
|
||||
log.Ctx(ctx).Info("query task index info successfully",
|
||||
zap.Int64("taskID", it.GetTaskID()), zap.String("result state", info.GetState().String()),
|
||||
zap.String("failReason", info.GetFailReason()))
|
||||
if info.GetState() == commonpb.IndexState_Finished || info.GetState() == commonpb.IndexState_Failed ||
|
||||
info.GetState() == commonpb.IndexState_Retry {
|
||||
// state is retry or finished or failed
|
||||
it.setResult(info)
|
||||
} else if info.GetState() == commonpb.IndexState_IndexStateNone {
|
||||
log.Ctx(ctx).Info("query task index info successfully",
|
||||
zap.Int64("taskID", it.GetTaskID()), zap.String("result state", info.GetState().String()),
|
||||
zap.String("failReason", info.GetFailReason()))
|
||||
it.SetState(indexpb.JobState_JobStateRetry, "index state is none in info response")
|
||||
}
|
||||
// inProgress or unissued, keep InProgress state
|
||||
@ -358,3 +361,7 @@ func (it *indexBuildTask) DropTaskOnWorker(ctx context.Context, client types.Dat
|
||||
func (it *indexBuildTask) SetJobInfo(meta *meta) error {
|
||||
return meta.indexMeta.FinishTask(it.taskInfo)
|
||||
}
|
||||
|
||||
func (it *indexBuildTask) DropTaskMeta(ctx context.Context, meta *meta) error {
|
||||
return meta.indexMeta.RemoveSegmentIndexByID(ctx, it.taskID)
|
||||
}
|
||||
|
||||
@ -33,6 +33,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type taskScheduler struct {
|
||||
@ -44,8 +45,7 @@ type taskScheduler struct {
|
||||
collectMetricsDuration time.Duration
|
||||
|
||||
pendingTasks schedulePolicy
|
||||
runningTasks map[UniqueID]Task
|
||||
runningQueueLock sync.RWMutex
|
||||
runningTasks *typeutil.ConcurrentMap[UniqueID, Task]
|
||||
|
||||
taskLock *lock.KeyLock[int64]
|
||||
|
||||
@ -80,7 +80,7 @@ func newTaskScheduler(
|
||||
cancel: cancel,
|
||||
meta: metaTable,
|
||||
pendingTasks: newFairQueuePolicy(),
|
||||
runningTasks: make(map[UniqueID]Task),
|
||||
runningTasks: typeutil.NewConcurrentMap[UniqueID, Task](),
|
||||
notifyChan: make(chan struct{}, 1),
|
||||
taskLock: lock.NewKeyLock[int64](),
|
||||
scheduleDuration: Params.DataCoordCfg.IndexTaskSchedulerInterval.GetAsDuration(time.Millisecond),
|
||||
@ -125,6 +125,10 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
State: segIndex.IndexState,
|
||||
FailReason: segIndex.FailReason,
|
||||
},
|
||||
req: &workerpb.CreateJobRequest{
|
||||
ClusterID: Params.CommonCfg.ClusterPrefix.GetValue(),
|
||||
BuildID: segIndex.BuildID,
|
||||
},
|
||||
queueTime: time.Now(),
|
||||
startTime: time.Now(),
|
||||
endTime: time.Now(),
|
||||
@ -133,9 +137,7 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
case commonpb.IndexState_IndexStateNone, commonpb.IndexState_Unissued:
|
||||
s.pendingTasks.Push(task)
|
||||
case commonpb.IndexState_InProgress, commonpb.IndexState_Retry:
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[segIndex.BuildID] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(segIndex.BuildID, task)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,6 +152,10 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
State: t.State,
|
||||
FailReason: t.FailReason,
|
||||
},
|
||||
req: &workerpb.AnalyzeRequest{
|
||||
ClusterID: Params.CommonCfg.ClusterPrefix.GetValue(),
|
||||
TaskID: taskID,
|
||||
},
|
||||
queueTime: time.Now(),
|
||||
startTime: time.Now(),
|
||||
endTime: time.Now(),
|
||||
@ -158,9 +164,7 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
case indexpb.JobState_JobStateNone, indexpb.JobState_JobStateInit:
|
||||
s.pendingTasks.Push(task)
|
||||
case indexpb.JobState_JobStateInProgress, indexpb.JobState_JobStateRetry:
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[taskID] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(taskID, task)
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,6 +180,10 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
State: t.GetState(),
|
||||
FailReason: t.GetFailReason(),
|
||||
},
|
||||
req: &workerpb.CreateStatsRequest{
|
||||
ClusterID: Params.CommonCfg.ClusterPrefix.GetValue(),
|
||||
TaskID: taskID,
|
||||
},
|
||||
queueTime: time.Now(),
|
||||
startTime: time.Now(),
|
||||
endTime: time.Now(),
|
||||
@ -208,9 +216,7 @@ func (s *taskScheduler) reloadFromMeta() {
|
||||
task.taskInfo.FailReason = "segment is not exist or is l0 compacting"
|
||||
}
|
||||
}
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[taskID] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(taskID, task)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -228,34 +234,14 @@ func (s *taskScheduler) exist(taskID UniqueID) bool {
|
||||
if exist {
|
||||
return true
|
||||
}
|
||||
|
||||
s.runningQueueLock.RLock()
|
||||
defer s.runningQueueLock.RUnlock()
|
||||
_, ok := s.runningTasks[taskID]
|
||||
_, ok := s.runningTasks.Get(taskID)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *taskScheduler) getRunningTask(taskID UniqueID) Task {
|
||||
s.runningQueueLock.RLock()
|
||||
defer s.runningQueueLock.RUnlock()
|
||||
|
||||
return s.runningTasks[taskID]
|
||||
}
|
||||
|
||||
func (s *taskScheduler) removeRunningTask(taskID UniqueID) {
|
||||
s.runningQueueLock.Lock()
|
||||
defer s.runningQueueLock.Unlock()
|
||||
|
||||
delete(s.runningTasks, taskID)
|
||||
}
|
||||
|
||||
func (s *taskScheduler) enqueue(task Task) {
|
||||
defer s.notify()
|
||||
taskID := task.GetTaskID()
|
||||
|
||||
s.runningQueueLock.RLock()
|
||||
_, ok := s.runningTasks[taskID]
|
||||
s.runningQueueLock.RUnlock()
|
||||
_, ok := s.runningTasks.Get(taskID)
|
||||
if !ok {
|
||||
s.pendingTasks.Push(task)
|
||||
task.SetQueueTime(time.Now())
|
||||
@ -265,25 +251,21 @@ func (s *taskScheduler) enqueue(task Task) {
|
||||
|
||||
func (s *taskScheduler) AbortTask(taskID int64) {
|
||||
log.Ctx(s.ctx).Info("task scheduler receive abort task request", zap.Int64("taskID", taskID))
|
||||
s.taskLock.Lock(taskID)
|
||||
defer s.taskLock.Unlock(taskID)
|
||||
|
||||
task := s.pendingTasks.Get(taskID)
|
||||
if task != nil {
|
||||
s.taskLock.Lock(taskID)
|
||||
task.SetState(indexpb.JobState_JobStateFailed, "canceled")
|
||||
s.taskLock.Unlock(taskID)
|
||||
s.runningTasks.Insert(taskID, task)
|
||||
s.pendingTasks.Remove(taskID)
|
||||
return
|
||||
}
|
||||
|
||||
s.runningQueueLock.Lock()
|
||||
if task != nil {
|
||||
s.runningTasks[taskID] = task
|
||||
}
|
||||
if runningTask, ok := s.runningTasks[taskID]; ok {
|
||||
s.taskLock.Lock(taskID)
|
||||
if runningTask, ok := s.runningTasks.Get(taskID); ok {
|
||||
runningTask.SetState(indexpb.JobState_JobStateFailed, "canceled")
|
||||
s.taskLock.Unlock(taskID)
|
||||
s.runningTasks.Insert(taskID, runningTask)
|
||||
}
|
||||
s.runningQueueLock.Unlock()
|
||||
s.pendingTasks.Remove(taskID)
|
||||
}
|
||||
|
||||
func (s *taskScheduler) schedule() {
|
||||
@ -326,34 +308,29 @@ func (s *taskScheduler) checkProcessingTasksLoop() {
|
||||
}
|
||||
|
||||
func (s *taskScheduler) checkProcessingTasks() {
|
||||
runningTaskIDs := make([]UniqueID, 0)
|
||||
s.runningQueueLock.RLock()
|
||||
for taskID := range s.runningTasks {
|
||||
runningTaskIDs = append(runningTaskIDs, taskID)
|
||||
if s.runningTasks.Len() <= 0 {
|
||||
return
|
||||
}
|
||||
s.runningQueueLock.RUnlock()
|
||||
|
||||
log.Ctx(s.ctx).Info("check running tasks", zap.Int("runningTask num", len(runningTaskIDs)))
|
||||
log.Ctx(s.ctx).Info("check running tasks", zap.Int("runningTask num", s.runningTasks.Len()))
|
||||
|
||||
allRunningTasks := s.runningTasks.Values()
|
||||
var wg sync.WaitGroup
|
||||
sem := make(chan struct{}, 100)
|
||||
for _, taskID := range runningTaskIDs {
|
||||
for _, task := range allRunningTasks {
|
||||
wg.Add(1)
|
||||
sem <- struct{}{}
|
||||
taskID := taskID
|
||||
go func(taskID int64) {
|
||||
go func(task Task) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
<-sem
|
||||
}()
|
||||
task := s.getRunningTask(taskID)
|
||||
s.taskLock.Lock(taskID)
|
||||
s.taskLock.Lock(task.GetTaskID())
|
||||
suc := s.checkProcessingTask(task)
|
||||
s.taskLock.Unlock(taskID)
|
||||
s.taskLock.Unlock(task.GetTaskID())
|
||||
if suc {
|
||||
s.removeRunningTask(taskID)
|
||||
s.runningTasks.Remove(task.GetTaskID())
|
||||
}
|
||||
}(taskID)
|
||||
}(task)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@ -410,13 +387,13 @@ func (s *taskScheduler) run() {
|
||||
|
||||
switch task.GetState() {
|
||||
case indexpb.JobState_JobStateNone:
|
||||
return
|
||||
if !s.processNone(task) {
|
||||
s.pendingTasks.Push(task)
|
||||
}
|
||||
case indexpb.JobState_JobStateInit:
|
||||
s.pendingTasks.Push(task)
|
||||
default:
|
||||
s.runningQueueLock.Lock()
|
||||
s.runningTasks[task.GetTaskID()] = task
|
||||
s.runningQueueLock.Unlock()
|
||||
s.runningTasks.Insert(task.GetTaskID(), task)
|
||||
}
|
||||
}(task, nodeID)
|
||||
}
|
||||
@ -433,7 +410,7 @@ func (s *taskScheduler) process(task Task, nodeID int64) bool {
|
||||
|
||||
switch task.GetState() {
|
||||
case indexpb.JobState_JobStateNone:
|
||||
return true
|
||||
return s.processNone(task)
|
||||
case indexpb.JobState_JobStateInit:
|
||||
return s.processInit(task, nodeID)
|
||||
default:
|
||||
@ -505,11 +482,10 @@ func (s *taskScheduler) collectTaskMetrics() {
|
||||
collectPendingMetricsFunc(taskID)
|
||||
}
|
||||
|
||||
s.runningQueueLock.RLock()
|
||||
for _, task := range s.runningTasks {
|
||||
allRunningTasks := s.runningTasks.Values()
|
||||
for _, task := range allRunningTasks {
|
||||
collectRunningMetricsFunc(task)
|
||||
}
|
||||
s.runningQueueLock.RUnlock()
|
||||
|
||||
for taskType, queueingTime := range maxTaskQueueingTime {
|
||||
metrics.DataCoordTaskExecuteLatency.
|
||||
@ -577,6 +553,14 @@ func (s *taskScheduler) processInit(task Task, nodeID int64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *taskScheduler) processNone(task Task) bool {
|
||||
if err := task.DropTaskMeta(s.ctx, s.meta); err != nil {
|
||||
log.Ctx(s.ctx).Warn("set job info failed", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *taskScheduler) processFinished(task Task) bool {
|
||||
if err := task.SetJobInfo(s.meta); err != nil {
|
||||
log.Ctx(s.ctx).Warn("update task info failed", zap.Error(err))
|
||||
|
||||
@ -46,6 +46,7 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -249,9 +250,209 @@ func createIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx0 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx0.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx1 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx1.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx2 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx2.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 2,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 2,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: true,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx3 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx3.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 3,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 500,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx4 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx4.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 4,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx5 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx5.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 5,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx6 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx6.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 6,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 6,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx7 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx7.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 7,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 7,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "error",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx8 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx8.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 8,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 8,
|
||||
NodeID: nodeID + 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx9 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx9.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 9,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 500,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 9,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIdx10 := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx10.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID + 10,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 500,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 10,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx0)
|
||||
segIndexes.Insert(segID+1, segIdx1)
|
||||
segIndexes.Insert(segID+2, segIdx2)
|
||||
segIndexes.Insert(segID+3, segIdx3)
|
||||
segIndexes.Insert(segID+4, segIdx4)
|
||||
segIndexes.Insert(segID+5, segIdx5)
|
||||
segIndexes.Insert(segID+6, segIdx6)
|
||||
segIndexes.Insert(segID+7, segIdx7)
|
||||
segIndexes.Insert(segID+8, segIdx8)
|
||||
segIndexes.Insert(segID+9, segIdx9)
|
||||
segIndexes.Insert(segID+10, segIdx10)
|
||||
|
||||
return &indexMeta{
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -281,206 +482,7 @@ func createIndexMeta(catalog metastore.DataCoordCatalog) *indexMeta {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 1: {
|
||||
indexID: {
|
||||
SegmentID: segID + 1,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 1,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 2: {
|
||||
indexID: {
|
||||
SegmentID: segID + 2,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 2,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: true,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 3: {
|
||||
indexID: {
|
||||
SegmentID: segID + 3,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 500,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 3,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 4: {
|
||||
indexID: {
|
||||
SegmentID: segID + 4,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 4,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 5: {
|
||||
indexID: {
|
||||
SegmentID: segID + 5,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 5,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 6: {
|
||||
indexID: {
|
||||
SegmentID: segID + 6,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 6,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Finished,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 7: {
|
||||
indexID: {
|
||||
SegmentID: segID + 7,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 7,
|
||||
NodeID: 0,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_Failed,
|
||||
FailReason: "error",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 8: {
|
||||
indexID: {
|
||||
SegmentID: segID + 8,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 1026,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 8,
|
||||
NodeID: nodeID + 1,
|
||||
IndexVersion: 1,
|
||||
IndexState: commonpb.IndexState_InProgress,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 9: {
|
||||
indexID: {
|
||||
SegmentID: segID + 9,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 500,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 9,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
segID + 10: {
|
||||
indexID: {
|
||||
SegmentID: segID + 10,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: 500,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID + 10,
|
||||
NodeID: nodeID,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 1111,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
segmentBuildInfo: indexBuildInfo,
|
||||
}
|
||||
}
|
||||
@ -784,6 +786,9 @@ func (s *taskSchedulerSuite) scheduler(handler Handler) {
|
||||
})
|
||||
return nil
|
||||
})
|
||||
catalog.EXPECT().DropSegmentIndex(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(nil).Maybe()
|
||||
catalog.EXPECT().DropAnalyzeTask(mock.Anything, mock.Anything).Return(nil).Maybe()
|
||||
catalog.EXPECT().AlterSegmentIndexes(mock.Anything, mock.Anything).Return(nil)
|
||||
// catalog.EXPECT().SaveStatsTask(mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
@ -857,6 +862,9 @@ func (s *taskSchedulerSuite) scheduler(handler Handler) {
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
}))
|
||||
|
||||
cm := mocks.NewChunkManager(s.T())
|
||||
@ -864,14 +872,20 @@ func (s *taskSchedulerSuite) scheduler(handler Handler) {
|
||||
|
||||
scheduler := newTaskScheduler(ctx, mt, workerManager, cm, newIndexEngineVersionManager(), handler, nil, nil)
|
||||
s.Equal(6, scheduler.pendingTasks.TaskCount())
|
||||
s.Equal(3, len(scheduler.runningTasks))
|
||||
s.Equal(3, scheduler.runningTasks.Len())
|
||||
s.Equal(indexpb.JobState_JobStateInit, scheduler.pendingTasks.Get(1).GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInit, scheduler.pendingTasks.Get(2).GetState())
|
||||
s.Equal(indexpb.JobState_JobStateRetry, scheduler.runningTasks[5].GetState())
|
||||
t5, ok := scheduler.runningTasks.Get(5)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateRetry, t5.GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInit, scheduler.pendingTasks.Get(buildID).GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, scheduler.runningTasks[buildID+1].GetState())
|
||||
t6, ok := scheduler.runningTasks.Get(buildID + 1)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, t6.GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInit, scheduler.pendingTasks.Get(buildID+3).GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, scheduler.runningTasks[buildID+8].GetState())
|
||||
t8, ok := scheduler.runningTasks.Get(buildID + 8)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateInProgress, t8.GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInit, scheduler.pendingTasks.Get(buildID+9).GetState())
|
||||
s.Equal(indexpb.JobState_JobStateInit, scheduler.pendingTasks.Get(buildID+10).GetState())
|
||||
|
||||
@ -907,9 +921,7 @@ func (s *taskSchedulerSuite) scheduler(handler Handler) {
|
||||
if scheduler.pendingTasks.TaskCount() == 0 {
|
||||
// maybe task is waiting for assigning, so sleep three seconds.
|
||||
time.Sleep(time.Second * 3)
|
||||
scheduler.runningQueueLock.RLock()
|
||||
taskNum := len(scheduler.runningTasks)
|
||||
scheduler.runningQueueLock.RUnlock()
|
||||
taskNum := scheduler.runningTasks.Len()
|
||||
if taskNum == 0 {
|
||||
break
|
||||
}
|
||||
@ -952,10 +964,9 @@ func (s *taskSchedulerSuite) scheduler(handler Handler) {
|
||||
indexJob, exist = mt.indexMeta.GetIndexJob(buildID + 8)
|
||||
s.True(exist)
|
||||
s.Equal(commonpb.IndexState_Finished, indexJob.IndexState)
|
||||
indexJob, exist = mt.indexMeta.GetIndexJob(buildID + 9)
|
||||
s.True(exist)
|
||||
// segment not healthy, wait for GC
|
||||
s.Equal(commonpb.IndexState_Unissued, indexJob.IndexState)
|
||||
_, exist = mt.indexMeta.GetIndexJob(buildID + 9)
|
||||
s.False(exist)
|
||||
// segment not healthy, remove task
|
||||
indexJob, exist = mt.indexMeta.GetIndexJob(buildID + 10)
|
||||
s.True(exist)
|
||||
s.Equal(commonpb.IndexState_Finished, indexJob.IndexState)
|
||||
@ -1009,12 +1020,17 @@ func (s *taskSchedulerSuite) Test_analyzeTaskFailCase() {
|
||||
}),
|
||||
withIndexMeta(&indexMeta{
|
||||
ctx: ctx,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
catalog: catalog,
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
}),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
tasks: nil,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
}))
|
||||
|
||||
handler := NewNMockHandler(s.T())
|
||||
@ -1030,9 +1046,7 @@ func (s *taskSchedulerSuite) Test_analyzeTaskFailCase() {
|
||||
|
||||
for {
|
||||
if scheduler.pendingTasks.TaskCount() == 0 {
|
||||
scheduler.runningQueueLock.RLock()
|
||||
taskNum := len(scheduler.runningTasks)
|
||||
scheduler.runningQueueLock.RUnlock()
|
||||
taskNum := scheduler.runningTasks.Len()
|
||||
if taskNum == 0 {
|
||||
break
|
||||
}
|
||||
@ -1063,9 +1077,15 @@ func (s *taskSchedulerSuite) Test_analyzeTaskFailCase() {
|
||||
withIndexMeta(&indexMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
segmentIndexes: typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]](),
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
}), withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
}))
|
||||
|
||||
handler := NewNMockHandler(s.T())
|
||||
@ -1279,10 +1299,7 @@ func (s *taskSchedulerSuite) Test_analyzeTaskFailCase() {
|
||||
|
||||
for {
|
||||
if scheduler.pendingTasks.TaskCount() == 0 {
|
||||
scheduler.runningQueueLock.RLock()
|
||||
taskNum := len(scheduler.runningTasks)
|
||||
scheduler.runningQueueLock.RUnlock()
|
||||
|
||||
taskNum := scheduler.runningTasks.Len()
|
||||
if taskNum == 0 {
|
||||
break
|
||||
}
|
||||
@ -1308,15 +1325,29 @@ func (s *taskSchedulerSuite) Test_indexTaskFailCase() {
|
||||
}
|
||||
})
|
||||
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: s.collectionID,
|
||||
PartitionID: s.partitionID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx)
|
||||
|
||||
mt := createMeta(catalog,
|
||||
withAnalyzeMeta(&analyzeMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
}),
|
||||
withIndexMeta(&indexMeta{
|
||||
RWMutex: sync.RWMutex{},
|
||||
fieldIndexLock: sync.RWMutex{},
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
s.collectionID: {
|
||||
indexID: {
|
||||
@ -1344,23 +1375,14 @@ func (s *taskSchedulerSuite) Test_indexTaskFailCase() {
|
||||
},
|
||||
},
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
buildID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: s.collectionID,
|
||||
PartitionID: s.partitionID,
|
||||
NumRows: 1025,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
}),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
}))
|
||||
|
||||
mt.indexMeta.segmentBuildInfo.Add(&model.SegmentIndex{
|
||||
@ -1504,9 +1526,7 @@ func (s *taskSchedulerSuite) Test_indexTaskFailCase() {
|
||||
<-finishCH
|
||||
for {
|
||||
if scheduler.pendingTasks.TaskCount() == 0 {
|
||||
scheduler.runningQueueLock.RLock()
|
||||
taskNum := len(scheduler.runningTasks)
|
||||
scheduler.runningQueueLock.RUnlock()
|
||||
taskNum := scheduler.runningTasks.Len()
|
||||
if taskNum == 0 {
|
||||
break
|
||||
}
|
||||
@ -1574,6 +1594,25 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
|
||||
IsPartitionKey: true,
|
||||
},
|
||||
}
|
||||
segIndexes := typeutil.NewConcurrentMap[UniqueID, *typeutil.ConcurrentMap[UniqueID, *model.SegmentIndex]]()
|
||||
segIdx := typeutil.NewConcurrentMap[UniqueID, *model.SegmentIndex]()
|
||||
segIdx.Insert(indexID, &model.SegmentIndex{
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: minNumberOfRowsToBuild,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
})
|
||||
segIndexes.Insert(segID, segIdx)
|
||||
mt := meta{
|
||||
catalog: catalog,
|
||||
collections: map[int64]*collectionInfo{
|
||||
@ -1593,6 +1632,7 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
|
||||
|
||||
indexMeta: &indexMeta{
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
indexID: {
|
||||
@ -1622,26 +1662,7 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: map[UniqueID]map[UniqueID]*model.SegmentIndex{
|
||||
segID: {
|
||||
indexID: {
|
||||
SegmentID: segID,
|
||||
CollectionID: collID,
|
||||
PartitionID: partID,
|
||||
NumRows: minNumberOfRowsToBuild,
|
||||
IndexID: indexID,
|
||||
BuildID: buildID,
|
||||
NodeID: 0,
|
||||
IndexVersion: 0,
|
||||
IndexState: commonpb.IndexState_Unissued,
|
||||
FailReason: "",
|
||||
IsDeleted: false,
|
||||
CreatedUTCTime: 0,
|
||||
IndexFileKeys: nil,
|
||||
IndexSerializedSize: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
segmentIndexes: segIndexes,
|
||||
segmentBuildInfo: newSegmentIndexBuildInfo(),
|
||||
},
|
||||
segments: &SegmentsInfo{
|
||||
@ -1663,6 +1684,9 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
|
||||
statsTaskMeta: &statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
},
|
||||
}
|
||||
|
||||
@ -1702,25 +1726,33 @@ func (s *taskSchedulerSuite) Test_indexTaskWithMvOptionalScalarField() {
|
||||
|
||||
waitTaskDoneFunc := func(sche *taskScheduler) {
|
||||
for {
|
||||
time.Sleep(time.Second * 3)
|
||||
if sche.pendingTasks.TaskCount() == 0 {
|
||||
sche.runningQueueLock.RLock()
|
||||
taskNum := len(sche.runningTasks)
|
||||
sche.runningQueueLock.RUnlock()
|
||||
taskNum := scheduler.runningTasks.Len()
|
||||
if taskNum == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
resetMetaFunc := func() {
|
||||
mt.indexMeta.Lock()
|
||||
defer mt.indexMeta.Unlock()
|
||||
mt.indexMeta.keyLock.Lock(buildID)
|
||||
t, ok := mt.indexMeta.segmentBuildInfo.Get(buildID)
|
||||
s.True(ok)
|
||||
t.IndexState = commonpb.IndexState_Unissued
|
||||
mt.indexMeta.segmentIndexes[segID][indexID].IndexState = commonpb.IndexState_Unissued
|
||||
mt.indexMeta.segmentBuildInfo.Add(t)
|
||||
segIdxes, ok := mt.indexMeta.segmentIndexes.Get(segID)
|
||||
s.True(ok)
|
||||
t, ok = segIdxes.Get(indexID)
|
||||
s.True(ok)
|
||||
t.IndexState = commonpb.IndexState_Unissued
|
||||
segIdxes.Insert(indexID, t)
|
||||
mt.indexMeta.segmentIndexes.Insert(segID, segIdxes)
|
||||
mt.indexMeta.keyLock.Unlock(buildID)
|
||||
|
||||
mt.indexMeta.fieldIndexLock.Lock()
|
||||
defer mt.indexMeta.fieldIndexLock.Unlock()
|
||||
mt.indexMeta.indexes[collID][indexID].IndexParams[1].Value = "HNSW"
|
||||
mt.collections[collID].Schema.Fields[0].DataType = schemapb.DataType_FloatVector
|
||||
mt.collections[collID].Schema.Fields[1].IsPartitionKey = true
|
||||
@ -2018,12 +2050,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
catalog := catalogmocks.NewDataCoordCatalog(s.T())
|
||||
workerManager := session.NewMockWorkerManager(s.T())
|
||||
handler := NewNMockHandler(s.T())
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
statsTaskID: {
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 10000,
|
||||
PartitionID: 10001,
|
||||
SegmentID: 1000,
|
||||
@ -2036,15 +2064,25 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
TargetSegmentID: 2000,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
CanRecycle: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
tasks.Insert(statsTaskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
}))
|
||||
compactionHandler := NewMockCompactionPlanContext(s.T())
|
||||
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(true).Maybe()
|
||||
scheduler := newTaskScheduler(context.Background(), mt, workerManager, nil, nil, handler, nil, compactionHandler)
|
||||
s.NotNil(scheduler)
|
||||
s.True(mt.segments.segments[1000].isCompacting)
|
||||
task := scheduler.runningTasks[statsTaskID]
|
||||
task, ok := scheduler.runningTasks.Get(statsTaskID)
|
||||
s.True(ok)
|
||||
s.NotNil(task)
|
||||
})
|
||||
|
||||
@ -2053,12 +2091,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(nil)
|
||||
workerManager := session.NewMockWorkerManager(s.T())
|
||||
handler := NewNMockHandler(s.T())
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
statsTaskID: {
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 10000,
|
||||
PartitionID: 10001,
|
||||
SegmentID: 1000,
|
||||
@ -2071,8 +2105,18 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
TargetSegmentID: 2000,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
CanRecycle: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
tasks.Insert(statsTaskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
}))
|
||||
compactionHandler := NewMockCompactionPlanContext(s.T())
|
||||
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(true).Maybe()
|
||||
@ -2089,12 +2133,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||
workerManager := session.NewMockWorkerManager(s.T())
|
||||
handler := NewNMockHandler(s.T())
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
statsTaskID: {
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 10000,
|
||||
PartitionID: 10001,
|
||||
SegmentID: 1000,
|
||||
@ -2107,8 +2147,18 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
TargetSegmentID: 2000,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
CanRecycle: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
tasks.Insert(statsTaskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
}))
|
||||
compactionHandler := NewMockCompactionPlanContext(s.T())
|
||||
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(true).Maybe()
|
||||
@ -2116,7 +2166,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
scheduler := newTaskScheduler(context.Background(), mt, workerManager, nil, nil, handler, nil, compactionHandler)
|
||||
s.NotNil(scheduler)
|
||||
s.True(mt.segments.segments[1000].isCompacting)
|
||||
task := scheduler.runningTasks[statsTaskID]
|
||||
task, ok := scheduler.runningTasks.Get(statsTaskID)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateFailed, task.GetState())
|
||||
})
|
||||
|
||||
@ -2125,12 +2176,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(nil)
|
||||
workerManager := session.NewMockWorkerManager(s.T())
|
||||
handler := NewNMockHandler(s.T())
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
statsTaskID: {
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 10000,
|
||||
PartitionID: 10001,
|
||||
SegmentID: 1000,
|
||||
@ -2143,8 +2190,18 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
TargetSegmentID: 2000,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
CanRecycle: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
tasks.Insert(statsTaskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
}))
|
||||
compactionHandler := NewMockCompactionPlanContext(s.T())
|
||||
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(false).Maybe()
|
||||
@ -2161,12 +2218,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
catalog.EXPECT().DropStatsTask(mock.Anything, mock.Anything).Return(errors.New("mock error"))
|
||||
workerManager := session.NewMockWorkerManager(s.T())
|
||||
handler := NewNMockHandler(s.T())
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
statsTaskID: {
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 10000,
|
||||
PartitionID: 10001,
|
||||
SegmentID: 1000,
|
||||
@ -2179,8 +2232,19 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
TargetSegmentID: 2000,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
CanRecycle: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
tasks.Insert(statsTaskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
|
||||
mt := createMeta(catalog, withAnalyzeMeta(s.createAnalyzeMeta(catalog)), withIndexMeta(createIndexMeta(catalog)),
|
||||
withStatsTaskMeta(&statsTaskMeta{
|
||||
ctx: context.Background(),
|
||||
catalog: catalog,
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
}))
|
||||
compactionHandler := NewMockCompactionPlanContext(s.T())
|
||||
compactionHandler.EXPECT().checkAndSetSegmentStating(mock.Anything, mock.Anything).Return(false).Maybe()
|
||||
@ -2188,7 +2252,8 @@ func (s *taskSchedulerSuite) Test_reload() {
|
||||
scheduler := newTaskScheduler(context.Background(), mt, workerManager, nil, nil, handler, nil, compactionHandler)
|
||||
s.NotNil(scheduler)
|
||||
s.False(mt.segments.segments[1000].isCompacting)
|
||||
task := scheduler.runningTasks[statsTaskID]
|
||||
task, ok := scheduler.runningTasks.Get(statsTaskID)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateFailed, task.GetState())
|
||||
})
|
||||
}
|
||||
@ -2214,8 +2279,12 @@ func (s *taskSchedulerSuite) Test_zeroSegmentStats() {
|
||||
statsTaskMeta: &statsTaskMeta{
|
||||
ctx: ctx,
|
||||
catalog: catalog,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
taskID: {
|
||||
tasks: typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask](),
|
||||
segmentID2Tasks: typeutil.NewConcurrentMap[string, *indexpb.StatsTask](),
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
},
|
||||
}
|
||||
mt.statsTaskMeta.tasks.Insert(taskID, &indexpb.StatsTask{
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
SegmentID: segID,
|
||||
@ -2228,10 +2297,7 @@ func (s *taskSchedulerSuite) Test_zeroSegmentStats() {
|
||||
TargetSegmentID: targetSegID,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
CanRecycle: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
catalog.EXPECT().AddSegment(mock.Anything, mock.Anything).Return(nil)
|
||||
err := mt.AddSegment(ctx, &SegmentInfo{
|
||||
@ -2259,7 +2325,7 @@ func (s *taskSchedulerSuite) Test_zeroSegmentStats() {
|
||||
cancel: cancel,
|
||||
meta: mt,
|
||||
pendingTasks: newFairQueuePolicy(),
|
||||
runningTasks: make(map[UniqueID]Task),
|
||||
runningTasks: typeutil.NewConcurrentMap[UniqueID, Task](),
|
||||
notifyChan: make(chan struct{}, 1),
|
||||
taskLock: lock.NewKeyLock[int64](),
|
||||
scheduleDuration: Params.DataCoordCfg.IndexTaskSchedulerInterval.GetAsDuration(time.Millisecond),
|
||||
@ -2279,9 +2345,7 @@ func (s *taskSchedulerSuite) Test_zeroSegmentStats() {
|
||||
for {
|
||||
time.Sleep(time.Second)
|
||||
if scheduler.pendingTasks.TaskCount() == 0 {
|
||||
scheduler.runningQueueLock.RLock()
|
||||
taskNum := len(scheduler.runningTasks)
|
||||
scheduler.runningQueueLock.RUnlock()
|
||||
taskNum := scheduler.runningTasks.Len()
|
||||
if taskNum == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
@ -160,9 +160,10 @@ func (st *statsTask) UpdateMetaBuildingState(meta *meta) error {
|
||||
}
|
||||
|
||||
func (st *statsTask) PreCheck(ctx context.Context, dependency *taskScheduler) bool {
|
||||
log := log.Ctx(ctx).With(zap.Int64("taskID", st.taskID), zap.Int64("segmentID", st.segmentID))
|
||||
log := log.Ctx(ctx).With(zap.Int64("taskID", st.taskID), zap.Int64("segmentID", st.segmentID),
|
||||
zap.Int64("targetSegmentID", st.targetSegmentID))
|
||||
|
||||
statsMeta := dependency.meta.statsTaskMeta.GetStatsTaskBySegmentID(st.segmentID, st.subJobType)
|
||||
statsMeta := dependency.meta.statsTaskMeta.GetStatsTask(st.taskID)
|
||||
if statsMeta == nil {
|
||||
log.Warn("stats task meta is null, skip it")
|
||||
st.SetState(indexpb.JobState_JobStateNone, "stats task meta is null")
|
||||
@ -241,6 +242,9 @@ func (st *statsTask) PreCheck(ctx context.Context, dependency *taskScheduler) bo
|
||||
BinlogMaxSize: Params.DataNodeCfg.BinLogMaxSize.GetAsUint64(),
|
||||
}
|
||||
|
||||
log.Info("stats task pre check successfully", zap.String("subJobType", st.subJobType.String()),
|
||||
zap.Int64("num rows", segment.GetNumOfRows()), zap.Int64("task version", st.req.GetTaskVersion()))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -297,13 +301,16 @@ func (st *statsTask) QueryResult(ctx context.Context, client types.DataNodeClien
|
||||
|
||||
for _, result := range resp.GetStatsJobResults().GetResults() {
|
||||
if result.GetTaskID() == st.GetTaskID() {
|
||||
if result.GetState() == indexpb.JobState_JobStateFinished || result.GetState() == indexpb.JobState_JobStateRetry ||
|
||||
result.GetState() == indexpb.JobState_JobStateFailed {
|
||||
log.Ctx(ctx).Info("query stats task result success", zap.Int64("taskID", st.GetTaskID()),
|
||||
zap.Int64("segmentID", st.segmentID), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
if result.GetState() == indexpb.JobState_JobStateFinished || result.GetState() == indexpb.JobState_JobStateRetry ||
|
||||
result.GetState() == indexpb.JobState_JobStateFailed {
|
||||
st.setResult(result)
|
||||
} else if result.GetState() == indexpb.JobState_JobStateNone {
|
||||
log.Ctx(ctx).Info("query stats task result success", zap.Int64("taskID", st.GetTaskID()),
|
||||
zap.Int64("segmentID", st.segmentID), zap.String("result state", result.GetState().String()),
|
||||
zap.String("failReason", result.GetFailReason()))
|
||||
st.SetState(indexpb.JobState_JobStateRetry, "stats task state is none in info response")
|
||||
}
|
||||
// inProgress or unissued/init, keep InProgress state
|
||||
@ -374,3 +381,12 @@ func (st *statsTask) SetJobInfo(meta *meta) error {
|
||||
zap.String("subJobType", st.subJobType.String()), zap.String("state", st.taskInfo.GetState().String()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *statsTask) DropTaskMeta(ctx context.Context, meta *meta) error {
|
||||
if err := meta.statsTaskMeta.DropStatsTask(st.taskID); err != nil {
|
||||
log.Ctx(ctx).Warn("drop stats task failed", zap.Int64("taskID", st.taskID), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Ctx(ctx).Info("drop stats task success", zap.Int64("taskID", st.taskID))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -19,7 +19,6 @@ package datacoord
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -35,6 +34,8 @@ import (
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/workerpb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/lock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
||||
)
|
||||
|
||||
type statsTaskSuite struct {
|
||||
@ -55,6 +56,24 @@ func (s *statsTaskSuite) SetupSuite() {
|
||||
s.segID = 1179
|
||||
s.targetID = 1180
|
||||
|
||||
tasks := typeutil.NewConcurrentMap[UniqueID, *indexpb.StatsTask]()
|
||||
statsTask := &indexpb.StatsTask{
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
SegmentID: s.segID,
|
||||
InsertChannel: "ch1",
|
||||
TaskID: s.taskID,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
Version: 0,
|
||||
NodeID: 0,
|
||||
State: indexpb.JobState_JobStateInit,
|
||||
FailReason: "",
|
||||
}
|
||||
tasks.Insert(s.taskID, statsTask)
|
||||
secondaryIndex := typeutil.NewConcurrentMap[string, *indexpb.StatsTask]()
|
||||
secondaryKey := createSecondaryIndexKey(statsTask.GetSegmentID(), statsTask.GetSubJobType().String())
|
||||
secondaryIndex.Insert(secondaryKey, statsTask)
|
||||
|
||||
s.mt = &meta{
|
||||
segments: &SegmentsInfo{
|
||||
segments: map[int64]*SegmentInfo{
|
||||
@ -109,23 +128,11 @@ func (s *statsTaskSuite) SetupSuite() {
|
||||
},
|
||||
|
||||
statsTaskMeta: &statsTaskMeta{
|
||||
RWMutex: sync.RWMutex{},
|
||||
keyLock: lock.NewKeyLock[UniqueID](),
|
||||
ctx: context.Background(),
|
||||
catalog: nil,
|
||||
tasks: map[int64]*indexpb.StatsTask{
|
||||
s.taskID: {
|
||||
CollectionID: 1,
|
||||
PartitionID: 2,
|
||||
SegmentID: s.segID,
|
||||
InsertChannel: "ch1",
|
||||
TaskID: s.taskID,
|
||||
SubJobType: indexpb.StatsSubJob_Sort,
|
||||
Version: 0,
|
||||
NodeID: 0,
|
||||
State: indexpb.JobState_JobStateInit,
|
||||
FailReason: "",
|
||||
},
|
||||
},
|
||||
tasks: tasks,
|
||||
segmentID2Tasks: secondaryIndex,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -595,7 +602,9 @@ func (s *statsTaskSuite) TestTaskStats_PreCheck() {
|
||||
|
||||
s.NoError(st.SetJobInfo(s.mt))
|
||||
s.NotNil(s.mt.GetHealthySegment(context.TODO(), s.targetID))
|
||||
s.Equal(indexpb.JobState_JobStateFinished, s.mt.statsTaskMeta.tasks[s.taskID].GetState())
|
||||
t, ok := s.mt.statsTaskMeta.tasks.Get(s.taskID)
|
||||
s.True(ok)
|
||||
s.Equal(indexpb.JobState_JobStateFinished, t.GetState())
|
||||
s.Equal(datapb.SegmentLevel_L2, s.mt.GetHealthySegment(context.TODO(), s.targetID).GetLevel())
|
||||
})
|
||||
})
|
||||
|
||||
@ -46,4 +46,5 @@ type Task interface {
|
||||
SetEndTime(time.Time)
|
||||
GetEndTime() time.Time
|
||||
GetTaskType() string
|
||||
DropTaskMeta(ctx context.Context, meta *meta) error
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ import (
|
||||
|
||||
// TaskQueue is a queue used to store tasks.
|
||||
type TaskQueue interface {
|
||||
utChan() <-chan int
|
||||
utChan() <-chan struct{}
|
||||
utEmpty() bool
|
||||
utFull() bool
|
||||
addUnissuedTask(t Task) error
|
||||
@ -54,12 +54,12 @@ type IndexTaskQueue struct {
|
||||
// maxTaskNum should keep still
|
||||
maxTaskNum int64
|
||||
|
||||
utBufChan chan int // to block scheduler
|
||||
utBufChan chan struct{} // to block scheduler
|
||||
|
||||
sched *TaskScheduler
|
||||
}
|
||||
|
||||
func (queue *IndexTaskQueue) utChan() <-chan int {
|
||||
func (queue *IndexTaskQueue) utChan() <-chan struct{} {
|
||||
return queue.utBufChan
|
||||
}
|
||||
|
||||
@ -79,7 +79,10 @@ func (queue *IndexTaskQueue) addUnissuedTask(t Task) error {
|
||||
return errors.New("index task queue is full")
|
||||
}
|
||||
queue.unissuedTasks.PushBack(t)
|
||||
queue.utBufChan <- 1
|
||||
select {
|
||||
case queue.utBufChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -159,7 +162,7 @@ func NewIndexBuildTaskQueue(sched *TaskScheduler) *IndexTaskQueue {
|
||||
activeTasks: make(map[string]Task),
|
||||
maxTaskNum: 1024,
|
||||
|
||||
utBufChan: make(chan int, 1024),
|
||||
utBufChan: make(chan struct{}, 1024),
|
||||
sched: sched,
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,6 +309,7 @@ var (
|
||||
}, []string{statusLabelName})
|
||||
|
||||
// IndexTaskNum records the number of index tasks of each type.
|
||||
// Deprecated: please ues TaskNum after v2.5.5.
|
||||
IndexTaskNum = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: milvusNamespace,
|
||||
@ -350,7 +351,7 @@ var (
|
||||
Help: "latency of task execute operation",
|
||||
Buckets: longTaskBuckets,
|
||||
}, []string{
|
||||
taskTypeLabel,
|
||||
TaskTypeLabel,
|
||||
statusLabelName,
|
||||
})
|
||||
|
||||
@ -361,7 +362,7 @@ var (
|
||||
Subsystem: typeutil.DataCoordRole,
|
||||
Name: "task_count",
|
||||
Help: "number of index tasks of each type",
|
||||
}, []string{collectionIDLabelName, taskTypeLabel, taskStateLabel})
|
||||
}, []string{TaskTypeLabel, TaskStateLabel})
|
||||
)
|
||||
|
||||
// RegisterDataCoord registers DataCoord metrics
|
||||
|
||||
@ -136,8 +136,8 @@ const (
|
||||
LoadedLabel = "loaded"
|
||||
NumEntitiesAllLabel = "all"
|
||||
|
||||
taskTypeLabel = "task_type"
|
||||
taskStateLabel = "task_state"
|
||||
TaskTypeLabel = "task_type"
|
||||
TaskStateLabel = "task_state"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@ -435,7 +435,7 @@ var (
|
||||
Subsystem: typeutil.ProxyRole,
|
||||
Name: "queue_task_num",
|
||||
Help: "",
|
||||
}, []string{nodeIDLabelName, queueTypeLabelName, taskStateLabel})
|
||||
}, []string{nodeIDLabelName, queueTypeLabelName, TaskStateLabel})
|
||||
|
||||
ProxyParseExpressionLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
|
||||
@ -132,7 +132,7 @@ var (
|
||||
Name: "task_latency",
|
||||
Help: "latency of all kind of task in query coord scheduler scheduler",
|
||||
Buckets: longTaskBuckets,
|
||||
}, []string{collectionIDLabelName, taskTypeLabel, channelNameLabelName})
|
||||
}, []string{collectionIDLabelName, TaskTypeLabel, channelNameLabelName})
|
||||
|
||||
QueryCoordResourceGroupInfo = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user