mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
issue: https://github.com/milvus-io/milvus/issues/41690 This commit implements partial search result functionality when query nodes go down, improving system availability during node failures. The changes include: - Enhanced load balancing in proxy (lb_policy.go) to handle node failures with retry support - Added partial search result capability in querynode delegator and distribution logic - Implemented tests for various partial result scenarios when nodes go down - Added metrics to track partial search results in querynode_metrics.go - Updated parameter configuration to support partial result required data ratio - Replaced old partial_search_test.go with more comprehensive partial_result_on_node_down_test.go - Updated proto definitions and improved retry logic These changes improve query resilience by returning partial results to users when some query nodes are unavailable, ensuring that queries don't completely fail when a portion of data remains accessible. --------- Signed-off-by: Wei Liu <wei.liu@zilliz.com>
226 lines
5.6 KiB
Go
226 lines
5.6 KiB
Go
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
package retry
|
|
|
|
import (
|
|
"context"
|
|
"runtime"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/cockroachdb/errors"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
|
)
|
|
|
|
func getCaller(skip int) string {
|
|
_, file, line, ok := runtime.Caller(skip)
|
|
if !ok {
|
|
return "unknown"
|
|
}
|
|
return file + ":" + strconv.Itoa(line)
|
|
}
|
|
|
|
// Do will run function with retry mechanism.
|
|
// fn is the func to run.
|
|
// Option can control the retry times and timeout.
|
|
func Do(ctx context.Context, fn func() error, opts ...Option) error {
|
|
if !funcutil.CheckCtxValid(ctx) {
|
|
return ctx.Err()
|
|
}
|
|
|
|
log := log.Ctx(ctx)
|
|
c := newDefaultConfig()
|
|
|
|
for _, opt := range opts {
|
|
opt(c)
|
|
}
|
|
|
|
var lastErr error
|
|
|
|
for i := uint(0); c.attempts == 0 || i < c.attempts; i++ {
|
|
if err := fn(); err != nil {
|
|
if i%4 == 0 {
|
|
log.Warn("retry func failed",
|
|
zap.Uint("retried", i),
|
|
zap.Error(err),
|
|
zap.String("caller", getCaller(2)))
|
|
}
|
|
|
|
if !IsRecoverable(err) {
|
|
isContextErr := errors.IsAny(err, context.Canceled, context.DeadlineExceeded)
|
|
log.Warn("retry func failed, not be recoverable",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.Bool("isContextErr", isContextErr),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
if isContextErr && lastErr != nil {
|
|
return lastErr
|
|
}
|
|
return err
|
|
}
|
|
if c.isRetryErr != nil && !c.isRetryErr(err) {
|
|
log.Warn("retry func failed, not be retryable",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
return err
|
|
}
|
|
|
|
deadline, ok := ctx.Deadline()
|
|
if ok && time.Until(deadline) < c.sleep {
|
|
isContextErr := errors.IsAny(err, context.Canceled, context.DeadlineExceeded)
|
|
log.Warn("retry func failed, deadline",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.Bool("isContextErr", isContextErr),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
if isContextErr && lastErr != nil {
|
|
return lastErr
|
|
}
|
|
return err
|
|
}
|
|
|
|
lastErr = err
|
|
|
|
select {
|
|
case <-time.After(c.sleep):
|
|
case <-ctx.Done():
|
|
log.Warn("retry func failed, ctx done",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
return lastErr
|
|
}
|
|
|
|
c.sleep *= 2
|
|
if c.sleep > c.maxSleepTime {
|
|
c.sleep = c.maxSleepTime
|
|
}
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
if lastErr != nil {
|
|
log.Warn("retry func failed, reach max retry",
|
|
zap.Uint("attempt", c.attempts),
|
|
)
|
|
}
|
|
return lastErr
|
|
}
|
|
|
|
// Do will run function with retry mechanism.
|
|
// fn is the func to run, return err and shouldRetry flag.
|
|
// Option can control the retry times and timeout.
|
|
func Handle(ctx context.Context, fn func() (bool, error), opts ...Option) error {
|
|
if !funcutil.CheckCtxValid(ctx) {
|
|
return ctx.Err()
|
|
}
|
|
|
|
log := log.Ctx(ctx)
|
|
c := newDefaultConfig()
|
|
|
|
for _, opt := range opts {
|
|
opt(c)
|
|
}
|
|
|
|
var lastErr error
|
|
for i := uint(0); i < c.attempts; i++ {
|
|
if shouldRetry, err := fn(); err != nil {
|
|
if i%4 == 0 {
|
|
log.Warn("retry func failed",
|
|
zap.Uint("retried", i),
|
|
zap.String("caller", getCaller(2)),
|
|
zap.Error(err),
|
|
)
|
|
}
|
|
|
|
if !shouldRetry {
|
|
isContextErr := errors.IsAny(err, context.Canceled, context.DeadlineExceeded)
|
|
log.Warn("retry func failed, not be recoverable",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.Bool("isContextErr", isContextErr),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
if isContextErr && lastErr != nil {
|
|
return lastErr
|
|
}
|
|
return err
|
|
}
|
|
|
|
deadline, ok := ctx.Deadline()
|
|
if ok && time.Until(deadline) < c.sleep {
|
|
isContextErr := errors.IsAny(err, context.Canceled, context.DeadlineExceeded)
|
|
log.Warn("retry func failed, deadline",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.Bool("isContextErr", isContextErr),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
if isContextErr && lastErr != nil {
|
|
return lastErr
|
|
}
|
|
return err
|
|
}
|
|
|
|
lastErr = err
|
|
|
|
select {
|
|
case <-time.After(c.sleep):
|
|
case <-ctx.Done():
|
|
log.Warn("retry func failed, ctx done",
|
|
zap.Uint("retried", i),
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
return lastErr
|
|
}
|
|
|
|
c.sleep *= 2
|
|
if c.sleep > c.maxSleepTime {
|
|
c.sleep = c.maxSleepTime
|
|
}
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
if lastErr != nil {
|
|
log.Warn("retry func failed, reach max retry",
|
|
zap.Uint("attempt", c.attempts),
|
|
zap.String("caller", getCaller(2)),
|
|
)
|
|
}
|
|
return lastErr
|
|
}
|
|
|
|
// errUnrecoverable is error instance for unrecoverable.
|
|
var errUnrecoverable = errors.New("unrecoverable error")
|
|
|
|
// Unrecoverable method wrap an error to unrecoverableError. This will make retry
|
|
// quick return.
|
|
func Unrecoverable(err error) error {
|
|
return merr.Combine(err, errUnrecoverable)
|
|
}
|
|
|
|
// IsRecoverable is used to judge whether the error is wrapped by unrecoverableError.
|
|
func IsRecoverable(err error) bool {
|
|
return !errors.Is(err, errUnrecoverable)
|
|
}
|