mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
If collection TTL property is malformed (e.g., non-numeric value), compaction tasks would fail silently and get stuck. This change: - Add centralized GetCollectionTTL/GetCollectionTTLFromMap functions in pkg/common to handle TTL parsing with proper error handling - Validate TTL property in createCollectionTask and alterCollectionTask PreExecute to reject invalid values early - Refactor datacoord compaction policies to use the new common functions - Remove duplicated getCollectionTTL from datacoord/util.go issue: #46716 <!-- This is an auto-generated comment: release notes by coderabbit.ai --> - Core invariant: collection.ttl.seconds must be a parseable int64 and validated at collection creation/alter time so malformed TTLs never reach compaction/execution codepaths. - Bug fix (resolves #46716): malformed/non-numeric TTLs could silently cause compaction tasks to fail/stall; fixed by adding centralized parsing helpers pkg/common.GetCollectionTTL and GetCollectionTTLFromMap and validating TTL in createCollectionTask.PreExecute and alterCollectionTask.PreExecute (calls with default -1 and return parameter-invalid errors on parse failure). - Simplification / removed redundancy: eliminated duplicated getCollectionTTL in internal/datacoord/util.go and replaced ad-hoc TTL parsing across datacoord (compaction policies, import_util, compaction triggers) and proxy util with the common helpers, centralizing error handling and defaulting logic. - No data loss or behavior regression: valid TTL parsing semantics unchanged (helpers use identical int64 parsing and default fallback from paramtable/CommonCfg); validation occurs in PreExecute so existing valid collections proceed unchanged while malformed values are rejected early—compaction codepaths now receive only validated TTL values (or explicit defaults), preventing silent skips without altering valid execution flows. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
414 lines
12 KiB
Go
414 lines
12 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package datacoord
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/samber/lo"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
|
"github.com/milvus-io/milvus/internal/util/vecindexmgr"
|
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/indexpb"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/tsoutil"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
|
)
|
|
|
|
// Response response interface for verification
|
|
type Response interface {
|
|
GetStatus() *commonpb.Status
|
|
}
|
|
|
|
// VerifyResponse verify grpc Response 1. check error is nil 2. check response.GetStatus() with status success
|
|
func VerifyResponse(response interface{}, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if response == nil {
|
|
return errNilResponse
|
|
}
|
|
switch resp := response.(type) {
|
|
case Response:
|
|
// note that resp will not be nil here, since it's still an interface
|
|
if resp.GetStatus() == nil {
|
|
return errNilStatusResponse
|
|
}
|
|
return merr.Error(resp.GetStatus())
|
|
|
|
case *commonpb.Status:
|
|
if resp == nil {
|
|
return errNilResponse
|
|
}
|
|
return merr.Error(resp)
|
|
default:
|
|
return errUnknownResponseType
|
|
}
|
|
}
|
|
|
|
func FilterInIndexedSegments(ctx context.Context, handler Handler, mt *meta, skipNoIndexCollection bool, segments ...*SegmentInfo) []*SegmentInfo {
|
|
if len(segments) == 0 {
|
|
return nil
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
return nil
|
|
}
|
|
|
|
collectionSegments := lo.GroupBy(segments, func(segment *SegmentInfo) int64 {
|
|
return segment.GetCollectionID()
|
|
})
|
|
|
|
ret := make([]*SegmentInfo, 0)
|
|
for collection, segmentList := range collectionSegments {
|
|
// No segments will be filtered if there are no indices in the collection.
|
|
if skipNoIndexCollection && !mt.indexMeta.HasIndex(collection) {
|
|
ret = append(ret, segmentList...)
|
|
continue
|
|
}
|
|
|
|
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second*2)
|
|
|
|
coll, err := handler.GetCollection(timeoutCtx, collection)
|
|
cancel()
|
|
if err != nil {
|
|
log.Warn("failed to get collection schema", zap.Error(err))
|
|
continue
|
|
}
|
|
|
|
// get vector field id
|
|
var targetFieldIds []int64
|
|
// wait all vector datatype fields only
|
|
for _, field := range coll.Schema.GetFields() {
|
|
if typeutil.IsVectorType(field.GetDataType()) {
|
|
targetFieldIds = append(targetFieldIds, field.GetFieldID())
|
|
}
|
|
}
|
|
|
|
// include all scalar fields with index
|
|
if paramtable.Get().DataCoordCfg.DVForceAllIndexReady.GetAsBool() {
|
|
indices := mt.indexMeta.GetIndexesForCollection(collection, "")
|
|
for _, index := range indices {
|
|
targetFieldIds = append(targetFieldIds, index.FieldID)
|
|
}
|
|
}
|
|
segmentIDs := lo.Map(segmentList, func(seg *SegmentInfo, _ int) UniqueID {
|
|
return seg.GetID()
|
|
})
|
|
|
|
// get indexed segments which finish build index on all vector field
|
|
indexed := mt.indexMeta.GetIndexedSegments(collection, segmentIDs, targetFieldIds)
|
|
if len(indexed) > 0 {
|
|
indexedSet := typeutil.NewUniqueSet(indexed...)
|
|
for _, segment := range segmentList {
|
|
if !isFlushState(segment.GetState()) && segment.GetState() != commonpb.SegmentState_Dropped {
|
|
continue
|
|
}
|
|
|
|
if indexedSet.Contain(segment.GetID()) {
|
|
ret = append(ret, segment)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return ret
|
|
}
|
|
|
|
func getZeroTime() time.Time {
|
|
var t time.Time
|
|
return t
|
|
}
|
|
|
|
func UpdateCompactionSegmentSizeMetrics(segments []*datapb.CompactionSegment) {
|
|
var totalSize int64
|
|
for _, seg := range segments {
|
|
totalSize += getCompactedSegmentSize(seg)
|
|
}
|
|
// observe size in bytes
|
|
metrics.DataCoordCompactedSegmentSize.WithLabelValues().Observe(float64(totalSize))
|
|
}
|
|
|
|
func getCompactedSegmentSize(s *datapb.CompactionSegment) int64 {
|
|
var segmentSize int64
|
|
if s != nil {
|
|
for _, binlogs := range s.GetInsertLogs() {
|
|
for _, l := range binlogs.GetBinlogs() {
|
|
segmentSize += l.GetMemorySize()
|
|
}
|
|
}
|
|
|
|
for _, deltaLogs := range s.GetDeltalogs() {
|
|
for _, l := range deltaLogs.GetBinlogs() {
|
|
segmentSize += l.GetMemorySize()
|
|
}
|
|
}
|
|
|
|
for _, statsLogs := range s.GetField2StatslogPaths() {
|
|
for _, l := range statsLogs.GetBinlogs() {
|
|
segmentSize += l.GetMemorySize()
|
|
}
|
|
}
|
|
}
|
|
|
|
return segmentSize
|
|
}
|
|
|
|
// getCollectionAutoCompactionEnabled returns whether auto compaction for collection is enabled.
|
|
// if not set, returns global auto compaction config.
|
|
func getCollectionAutoCompactionEnabled(properties map[string]string) (bool, error) {
|
|
// when collection is on truncating, disable auto compaction.
|
|
if _, ok := properties[common.CollectionOnTruncatingKey]; ok {
|
|
return false, nil
|
|
}
|
|
v, ok := properties[common.CollectionAutoCompactionKey]
|
|
if ok {
|
|
enabled, err := strconv.ParseBool(v)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
return enabled, nil
|
|
}
|
|
return Params.DataCoordCfg.EnableAutoCompaction.GetAsBool(), nil
|
|
}
|
|
|
|
func GetIndexType(indexParams []*commonpb.KeyValuePair) string {
|
|
for _, param := range indexParams {
|
|
if param.Key == common.IndexTypeKey {
|
|
return param.Value
|
|
}
|
|
}
|
|
return invalidIndex
|
|
}
|
|
|
|
func isNoTrainIndex(indexType string) bool {
|
|
return vecindexmgr.GetVecIndexMgrInstance().IsNoTrainIndex(indexType)
|
|
}
|
|
|
|
func isMvSupported(indexType string) bool {
|
|
return vecindexmgr.GetVecIndexMgrInstance().IsMvSupported(indexType)
|
|
}
|
|
|
|
func isDiskANNIndex(indexType string) bool {
|
|
return vecindexmgr.GetVecIndexMgrInstance().IsDiskANN(indexType)
|
|
}
|
|
|
|
func parseBuildIDFromFilePath(key string) (UniqueID, error) {
|
|
ss := strings.Split(key, "/")
|
|
if strings.HasSuffix(key, "/") {
|
|
return strconv.ParseInt(ss[len(ss)-2], 10, 64)
|
|
}
|
|
return strconv.ParseInt(ss[len(ss)-1], 10, 64)
|
|
}
|
|
|
|
func getFieldBinlogs(id UniqueID, binlogs []*datapb.FieldBinlog) *datapb.FieldBinlog {
|
|
for _, binlog := range binlogs {
|
|
if id == binlog.GetFieldID() {
|
|
return binlog
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func mergeFieldBinlogs(currentBinlogs []*datapb.FieldBinlog, newBinlogs []*datapb.FieldBinlog) []*datapb.FieldBinlog {
|
|
for _, newBinlog := range newBinlogs {
|
|
fieldBinlogs := getFieldBinlogs(newBinlog.GetFieldID(), currentBinlogs)
|
|
if fieldBinlogs == nil {
|
|
currentBinlogs = append(currentBinlogs, newBinlog)
|
|
} else {
|
|
fieldBinlogs.Binlogs = append(fieldBinlogs.Binlogs, newBinlog.Binlogs...)
|
|
}
|
|
}
|
|
return currentBinlogs
|
|
}
|
|
|
|
func calculateL0SegmentSize(fields []*datapb.FieldBinlog) float64 {
|
|
size := int64(0)
|
|
for _, field := range fields {
|
|
for _, binlog := range field.GetBinlogs() {
|
|
size += binlog.GetMemorySize()
|
|
}
|
|
}
|
|
return float64(size)
|
|
}
|
|
|
|
func getCompactionMergeInfo(task *datapb.CompactionTask) *milvuspb.CompactionMergeInfo {
|
|
/*
|
|
segments := task.GetPlan().GetSegmentBinlogs()
|
|
var sources []int64
|
|
for _, s := range segments {
|
|
sources = append(sources, s.GetSegmentID())
|
|
}
|
|
*/
|
|
var target int64 = -1
|
|
if len(task.GetResultSegments()) > 0 {
|
|
target = task.GetResultSegments()[0]
|
|
}
|
|
return &milvuspb.CompactionMergeInfo{
|
|
Sources: task.GetInputSegments(),
|
|
Target: target,
|
|
}
|
|
}
|
|
|
|
func getBinLogIDs(segment *SegmentInfo, fieldID int64) []int64 {
|
|
binlogIDs := make([]int64, 0)
|
|
for _, fieldBinLog := range segment.GetBinlogs() {
|
|
if fieldBinLog.GetFieldID() == fieldID {
|
|
for _, binLog := range fieldBinLog.GetBinlogs() {
|
|
binlogIDs = append(binlogIDs, binLog.GetLogID())
|
|
}
|
|
break
|
|
}
|
|
}
|
|
return binlogIDs
|
|
}
|
|
|
|
func getTotalBinlogRows(segment *SegmentInfo, fieldID int64) int64 {
|
|
var total int64
|
|
for _, fieldBinLog := range segment.GetBinlogs() {
|
|
if fieldBinLog.GetFieldID() == fieldID {
|
|
for _, binLog := range fieldBinLog.GetBinlogs() {
|
|
total += binLog.EntriesNum
|
|
}
|
|
}
|
|
}
|
|
return total
|
|
}
|
|
|
|
func CheckCheckPointsHealth(meta *meta) error {
|
|
for channel, cp := range meta.GetChannelCheckpoints() {
|
|
collectionID := funcutil.GetCollectionIDFromVChannel(channel)
|
|
if collectionID == -1 {
|
|
log.RatedWarn(60, "can't parse collection id from vchannel, skip check cp lag", zap.String("vchannel", channel))
|
|
continue
|
|
}
|
|
if meta.GetCollection(collectionID) == nil {
|
|
log.RatedWarn(60, "corresponding the collection doesn't exists, skip check cp lag", zap.String("vchannel", channel))
|
|
continue
|
|
}
|
|
ts, _ := tsoutil.ParseTS(cp.Timestamp)
|
|
lag := time.Since(ts)
|
|
if lag > paramtable.Get().DataCoordCfg.ChannelCheckpointMaxLag.GetAsDuration(time.Second) {
|
|
return merr.WrapErrChannelCPExceededMaxLag(channel, fmt.Sprintf("checkpoint lag: %f(min)", lag.Minutes()))
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func createStorageConfig() *indexpb.StorageConfig {
|
|
var storageConfig *indexpb.StorageConfig
|
|
|
|
if Params.CommonCfg.StorageType.GetValue() == "local" {
|
|
storageConfig = &indexpb.StorageConfig{
|
|
RootPath: Params.LocalStorageCfg.Path.GetValue(),
|
|
StorageType: Params.CommonCfg.StorageType.GetValue(),
|
|
}
|
|
} else {
|
|
storageConfig = &indexpb.StorageConfig{
|
|
Address: Params.MinioCfg.Address.GetValue(),
|
|
AccessKeyID: Params.MinioCfg.AccessKeyID.GetValue(),
|
|
SecretAccessKey: Params.MinioCfg.SecretAccessKey.GetValue(),
|
|
UseSSL: Params.MinioCfg.UseSSL.GetAsBool(),
|
|
SslCACert: Params.MinioCfg.SslCACert.GetValue(),
|
|
BucketName: Params.MinioCfg.BucketName.GetValue(),
|
|
RootPath: Params.MinioCfg.RootPath.GetValue(),
|
|
UseIAM: Params.MinioCfg.UseIAM.GetAsBool(),
|
|
IAMEndpoint: Params.MinioCfg.IAMEndpoint.GetValue(),
|
|
StorageType: Params.CommonCfg.StorageType.GetValue(),
|
|
Region: Params.MinioCfg.Region.GetValue(),
|
|
UseVirtualHost: Params.MinioCfg.UseVirtualHost.GetAsBool(),
|
|
CloudProvider: Params.MinioCfg.CloudProvider.GetValue(),
|
|
RequestTimeoutMs: Params.MinioCfg.RequestTimeoutMs.GetAsInt64(),
|
|
GcpCredentialJSON: Params.MinioCfg.GcpCredentialJSON.GetValue(),
|
|
}
|
|
}
|
|
|
|
return storageConfig
|
|
}
|
|
|
|
func getSortStatus(sorted bool) string {
|
|
if sorted {
|
|
return "sorted"
|
|
}
|
|
return "unsorted"
|
|
}
|
|
|
|
func calculateIndexTaskSlot(fieldSize int64, isVectorIndex bool) int64 {
|
|
defaultSlots := Params.DataCoordCfg.IndexTaskSlotUsage.GetAsInt64()
|
|
if !isVectorIndex {
|
|
defaultSlots = Params.DataCoordCfg.ScalarIndexTaskSlotUsage.GetAsInt64()
|
|
}
|
|
if fieldSize > 512*1024*1024 {
|
|
taskSlot := max(fieldSize/512/1024/1024, 1) * defaultSlots
|
|
return max(taskSlot, 1)
|
|
} else if fieldSize > 100*1024*1024 {
|
|
return max(defaultSlots/4, 1)
|
|
} else if fieldSize > 10*1024*1024 {
|
|
return max(defaultSlots/16, 1)
|
|
}
|
|
return max(defaultSlots/64, 1)
|
|
}
|
|
|
|
func calculateStatsTaskSlot(segmentSize int64) int64 {
|
|
defaultSlots := Params.DataCoordCfg.StatsTaskSlotUsage.GetAsInt64()
|
|
if segmentSize > 512*1024*1024 {
|
|
taskSlot := max(segmentSize/512/1024/1024, 1) * defaultSlots
|
|
return max(taskSlot, 1)
|
|
} else if segmentSize > 100*1024*1024 {
|
|
return max(defaultSlots/2, 1)
|
|
} else if segmentSize > 10*1024*1024 {
|
|
return max(defaultSlots/4, 1)
|
|
}
|
|
return max(defaultSlots/8, 1)
|
|
}
|
|
|
|
func enableSortCompaction() bool {
|
|
return paramtable.Get().DataCoordCfg.EnableSortCompaction.GetAsBool() && paramtable.Get().DataCoordCfg.EnableCompaction.GetAsBool()
|
|
}
|
|
|
|
// stringifyBinlogs is used for logging, it's not used for other purposes.
|
|
func stringifyBinlogs(binlogs []*datapb.FieldBinlog) []string {
|
|
strs := make([]string, 0, len(binlogs))
|
|
byIDs := lo.GroupBy(binlogs, func(binlog *datapb.FieldBinlog) int64 {
|
|
return binlog.GetFieldID()
|
|
})
|
|
for _, binlogs := range byIDs {
|
|
fieldsStrs := make([]string, 0, len(binlogs))
|
|
for _, binlog := range binlogs {
|
|
for _, b := range binlog.GetBinlogs() {
|
|
fieldsStrs = append(fieldsStrs,
|
|
fmt.Sprintf("l%d(e%d,m%d,t%d-%d)", b.LogID, b.EntriesNum, b.MemorySize, b.TimestampFrom, b.TimestampTo),
|
|
)
|
|
}
|
|
}
|
|
strs = append(strs, fmt.Sprintf("f%d:%s", binlogs[0].GetFieldID(), strings.Join(fieldsStrs, "|")))
|
|
}
|
|
return strs
|
|
}
|