mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-08 01:58:34 +08:00
Skip canceled tasks (#19513)
Signed-off-by: yah01 <yang.cen@zilliz.com> Signed-off-by: yah01 <yang.cen@zilliz.com>
This commit is contained in:
parent
7ca844ab99
commit
9dcee37e1c
@ -169,7 +169,7 @@ queryCoord:
|
|||||||
distPullInterval: 500
|
distPullInterval: 500
|
||||||
loadTimeoutSeconds: 600
|
loadTimeoutSeconds: 600
|
||||||
checkHandoffInterval: 5000
|
checkHandoffInterval: 5000
|
||||||
taskMergeCap: 2
|
taskMergeCap: 8
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
checkRoundTaskNumLimit = 128
|
checkRoundTaskNumLimit = 256
|
||||||
)
|
)
|
||||||
|
|
||||||
type CheckerController struct {
|
type CheckerController struct {
|
||||||
@ -86,20 +86,21 @@ func (controller *CheckerController) Stop() {
|
|||||||
// check is the real implementation of Check
|
// check is the real implementation of Check
|
||||||
func (controller *CheckerController) check(ctx context.Context) {
|
func (controller *CheckerController) check(ctx context.Context) {
|
||||||
tasks := make([]task.Task, 0)
|
tasks := make([]task.Task, 0)
|
||||||
for id, checker := range controller.checkers {
|
for _, checker := range controller.checkers {
|
||||||
log := log.With(zap.Int("checkerID", id))
|
|
||||||
|
|
||||||
tasks = append(tasks, checker.Check(ctx)...)
|
tasks = append(tasks, checker.Check(ctx)...)
|
||||||
if len(tasks) >= checkRoundTaskNumLimit {
|
}
|
||||||
log.Info("checkers have spawn too many tasks, won't run subsequent checkers, and truncate the spawned tasks",
|
|
||||||
|
added := 0
|
||||||
|
for _, task := range tasks {
|
||||||
|
err := controller.scheduler.Add(task)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
added++
|
||||||
|
if added >= checkRoundTaskNumLimit {
|
||||||
|
log.Info("checkers have added too many tasks, truncate the subsequent tasks",
|
||||||
zap.Int("taskNum", len(tasks)),
|
zap.Int("taskNum", len(tasks)),
|
||||||
zap.Int("taskNumLimit", checkRoundTaskNumLimit))
|
zap.Int("taskNumLimit", checkRoundTaskNumLimit))
|
||||||
tasks = tasks[:checkRoundTaskNumLimit]
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, task := range tasks {
|
|
||||||
controller.scheduler.Add(task)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,7 +19,7 @@ const (
|
|||||||
TaskTypeReduce
|
TaskTypeReduce
|
||||||
TaskTypeMove
|
TaskTypeMove
|
||||||
|
|
||||||
taskPoolSize = 128
|
taskPoolSize = 256
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@ -38,6 +38,7 @@ import (
|
|||||||
|
|
||||||
type task interface {
|
type task interface {
|
||||||
ID() UniqueID // return ReqID
|
ID() UniqueID // return ReqID
|
||||||
|
Ctx() context.Context
|
||||||
Timestamp() Timestamp
|
Timestamp() Timestamp
|
||||||
PreExecute(ctx context.Context) error
|
PreExecute(ctx context.Context) error
|
||||||
Execute(ctx context.Context) error
|
Execute(ctx context.Context) error
|
||||||
@ -539,6 +540,12 @@ func (l *loadSegmentsTask) PreExecute(ctx context.Context) error {
|
|||||||
func (l *loadSegmentsTask) Execute(ctx context.Context) error {
|
func (l *loadSegmentsTask) Execute(ctx context.Context) error {
|
||||||
log.Info("LoadSegmentTask Execute start", zap.Int64("msgID", l.req.Base.MsgID))
|
log.Info("LoadSegmentTask Execute start", zap.Int64("msgID", l.req.Base.MsgID))
|
||||||
|
|
||||||
|
if len(l.req.Infos) == 0 {
|
||||||
|
log.Info("all segments loaded",
|
||||||
|
zap.Int64("msgID", l.req.GetBase().GetMsgID()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
segmentIDs := lo.Map(l.req.Infos, func(info *queryPb.SegmentLoadInfo, idx int) UniqueID { return info.SegmentID })
|
segmentIDs := lo.Map(l.req.Infos, func(info *queryPb.SegmentLoadInfo, idx int) UniqueID { return info.SegmentID })
|
||||||
l.node.metaReplica.addSegmentsLoadingList(segmentIDs)
|
l.node.metaReplica.addSegmentsLoadingList(segmentIDs)
|
||||||
defer l.node.metaReplica.removeSegmentsLoadingList(segmentIDs)
|
defer l.node.metaReplica.removeSegmentsLoadingList(segmentIDs)
|
||||||
|
|||||||
@ -123,7 +123,13 @@ func (s *taskScheduler) taskLoop() {
|
|||||||
case <-s.queue.utChan():
|
case <-s.queue.utChan():
|
||||||
if !s.queue.utEmpty() {
|
if !s.queue.utEmpty() {
|
||||||
t := s.queue.PopUnissuedTask()
|
t := s.queue.PopUnissuedTask()
|
||||||
s.processTask(t, s.queue)
|
select {
|
||||||
|
case <-t.Ctx().Done():
|
||||||
|
t.Notify(context.Canceled)
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
s.processTask(t, s.queue)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -661,7 +661,7 @@ func (p *queryCoordConfig) initTaskRetryInterval() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *queryCoordConfig) initTaskMergeCap() {
|
func (p *queryCoordConfig) initTaskMergeCap() {
|
||||||
p.TaskMergeCap = p.Base.ParseInt32WithDefault("queryCoord.taskMergeCap", 2)
|
p.TaskMergeCap = p.Base.ParseInt32WithDefault("queryCoord.taskMergeCap", 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *queryCoordConfig) initAutoHandoff() {
|
func (p *queryCoordConfig) initAutoHandoff() {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user