fix: [2.5]Fix target segment marked dropped for save stats result twice (#45480)

issue: #45477 

master pr: #45478

---------

Signed-off-by: Cai Zhang <cai.zhang@zilliz.com>
This commit is contained in:
cai.zhang 2025-11-13 09:41:37 +08:00 committed by GitHub
parent 1d6786545b
commit 6eb77ddc4d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 22 additions and 21 deletions

View File

@ -89,7 +89,7 @@ pipeline {
axes {
axis {
name 'milvus_deployment_option'
values 'standalone', 'distributed', 'standalone-kafka-mmap'
values 'standalone', 'distributed'
}
}
stages {

View File

@ -360,8 +360,11 @@ func (t *compactionTrigger) handleSignal(signal *compactionSignal) error {
coll, err := t.getCollection(group.collectionID)
if err != nil {
log.Warn("get collection info failed, skip handling compaction", zap.Error(err))
if signal.collectionID != 0 {
return err
}
continue
}
if !signal.isForce && !isCollectionAutoCompactionEnabled(coll) {
log.RatedInfo(20, "collection auto compaction disabled")

View File

@ -2117,8 +2117,8 @@ func (m *meta) SaveStatsResultSegment(oldSegmentID int64, result *workerpb.Stats
metricMutation := &segMetricMutation{stateChange: make(map[string]map[string]map[string]int)}
oldSegment := m.segments.GetSegment(oldSegmentID)
if oldSegment == nil {
log.Warn("old segment is not found with stats task")
if oldSegment == nil || !isSegmentHealthy(oldSegment) {
log.Warn("old segment is not found or not healthy with stats task")
return nil, merr.WrapErrSegmentNotFound(oldSegmentID)
}
@ -2144,7 +2144,8 @@ func (m *meta) SaveStatsResultSegment(oldSegmentID int64, result *workerpb.Stats
DmlPosition: oldSegment.GetDmlPosition(),
IsImporting: oldSegment.GetIsImporting(),
StorageVersion: oldSegment.GetStorageVersion(),
State: oldSegment.GetState(),
// only flushed segment can do sort stats
State: commonpb.SegmentState_Flushed,
Level: oldSegment.GetLevel(),
LastLevel: oldSegment.GetLastLevel(),
PartitionStatsVersion: oldSegment.GetPartitionStatsVersion(),

View File

@ -581,7 +581,9 @@ func (s *taskScheduler) processFinished(task Task) bool {
client, exist := s.nodeManager.GetClientByID(task.GetNodeID())
if exist {
if !task.DropTaskOnWorker(s.ctx, client) {
return false
log.Ctx(s.ctx).Warn("drop task on worker failed, but ignore it",
zap.Int64("taskID", task.GetTaskID()), zap.Int64("nodeID", task.GetNodeID()))
return true
}
}
log.Ctx(s.ctx).Info("task has been finished", zap.Int64("taskID", task.GetTaskID()),

View File

@ -1301,16 +1301,11 @@ func (s *taskSchedulerSuite) Test_analyzeTaskFailCase() {
// set job info failed --> state: Finished
catalog.EXPECT().SaveAnalyzeTask(mock.Anything, mock.Anything).Return(errors.New("set job info failed")).Once()
// set job success, drop job on task failed --> state: Finished
// set job success, drop job on task failed --> state: Finished (skip drop error, task success)
catalog.EXPECT().SaveAnalyzeTask(mock.Anything, mock.Anything).Return(nil).Once()
workerManager.EXPECT().GetClientByID(mock.Anything).Return(in, true).Once()
in.EXPECT().DropJobsV2(mock.Anything, mock.Anything).Return(merr.Status(errors.New("drop job failed")), nil).Once()
// drop job success --> no task
catalog.EXPECT().SaveAnalyzeTask(mock.Anything, mock.Anything).Return(nil).Once()
workerManager.EXPECT().GetClientByID(mock.Anything).Return(in, true).Once()
in.EXPECT().DropJobsV2(mock.Anything, mock.Anything).Return(merr.Success(), nil).Once()
for {
if scheduler.pendingTasks.TaskCount() == 0 {
taskNum := scheduler.runningTasks.Len()