mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
issue: #46550 - Add CatchUpStreamingDataTsLag parameter to control tolerable lag threshold for delegator to be considered caught up - Add catchingUpStreamingData field in delegator to track whether delegator has caught up with streaming data - Add catching_up_streaming_data field in LeaderViewStatus proto - Check catching up status in CheckDelegatorDataReady, return not ready when delegator is still catching up streaming data - Add unit tests for the new functionality When tsafe lag exceeds the threshold, the distribution will not be considered serviceable, preventing queries from timing out in waitTSafe. This is useful when streaming message queue consumption is slow. <!-- This is an auto-generated comment: release notes by coderabbit.ai --> - Core invariant: a delegator must not be considered serviceable while its tsafe lags behind the latest committed timestamp beyond a configurable tolerance; a delegator is "caught-up" only when (latestTsafe - delegator.GetTSafe()) < CatchUpStreamingDataTsLag (configured by queryNode.delegator.catchUpStreamingDataTsLag, default 1s). - New capability and where it takes effect: adds streaming-catchup tracking to QueryNode/QueryCoord — an atomic catchingUpStreamingData flag on shardDelegator (internal/querynodev2/delegator/delegator.go), a new param CatchUpStreamingDataTsLag (pkg/util/paramtable/component_param.go), and a LeaderViewStatus.catching_up_streaming_data field in the proto (pkg/proto/query_coord.proto). The flag is exposed in GetDataDistribution (internal/querynodev2/services.go) and used by QueryCoord readiness checks (internal/querycoordv2/utils/util.go::CheckDelegatorDataReady) to reject leaders that are still catching up. - What logic is simplified/added (not removed): instead of relying solely on segment distribution/worker heartbeats, the PR adds an explicit readiness gate that returns "not available" when the delegator reports catching-up-streaming-data. This is strictly additive — no existing checks are removed; the new precondition runs before segment availability validation to prevent premature routing to slow-consuming delegators. - Why this does NOT cause data loss or regress behavior: the change only controls serviceability visibility and routing — it never drops or mutates data. Concretely: shardDelegator starts with catchingUpStreamingData=true and flips to false in UpdateTSafe once the sampled lag falls below the configured threshold (internal/querynodev2/delegator/delegator.go::UpdateTSafe). QueryCoord will short-circuit in CheckDelegatorDataReady when leader.Status.GetCatchingUpStreamingData() is true (internal/querycoordv2/utils/util.go), returning a channel-not-available error before any segment checks; when the flag clears, existing segment-distribution checks (same code paths) resume. Tests added cover both catching-up and caught-up paths (internal/querynodev2/delegator/delegator_test.go, internal/querycoordv2/utils/util_test.go, internal/querynodev2/services_test.go), demonstrating convergence without changed data flows or deletion of data. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: Wei Liu <wei.liu@zilliz.com>
273 lines
9.1 KiB
Go
273 lines
9.1 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package utils
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/blang/semver/v4"
|
|
"github.com/bytedance/mockey"
|
|
"github.com/stretchr/testify/mock"
|
|
"github.com/stretchr/testify/suite"
|
|
|
|
"github.com/milvus-io/milvus/internal/querycoordv2/meta"
|
|
"github.com/milvus-io/milvus/internal/querycoordv2/session"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/datapb"
|
|
"github.com/milvus-io/milvus/pkg/v2/proto/querypb"
|
|
)
|
|
|
|
type UtilTestSuite struct {
|
|
suite.Suite
|
|
nodeMgr *session.NodeManager
|
|
}
|
|
|
|
func (suite *UtilTestSuite) SetupTest() {
|
|
suite.nodeMgr = session.NewNodeManager()
|
|
}
|
|
|
|
func (suite *UtilTestSuite) setNodeAvailable(nodes ...int64) {
|
|
for _, node := range nodes {
|
|
nodeInfo := session.NewNodeInfo(session.ImmutableNodeInfo{
|
|
NodeID: node,
|
|
Address: "",
|
|
Hostname: "localhost",
|
|
})
|
|
nodeInfo.SetLastHeartbeat(time.Now())
|
|
suite.nodeMgr.Add(nodeInfo)
|
|
}
|
|
}
|
|
|
|
func (suite *UtilTestSuite) TestCheckLeaderAvaliable() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 1,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
TargetVersion: 1011,
|
|
}
|
|
|
|
mockTargetManager := meta.NewMockTargetManager(suite.T())
|
|
mockTargetManager.EXPECT().GetSealedSegmentsByChannel(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[int64]*datapb.SegmentInfo{
|
|
2: {
|
|
ID: 2,
|
|
InsertChannel: "test",
|
|
},
|
|
}).Maybe()
|
|
mockTargetManager.EXPECT().GetCollectionTargetVersion(mock.Anything, mock.Anything, mock.Anything).Return(1011).Maybe()
|
|
|
|
suite.setNodeAvailable(1, 2)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, mockTargetManager, leadview, meta.CurrentTarget)
|
|
suite.NoError(err)
|
|
}
|
|
|
|
func (suite *UtilTestSuite) TestCheckLeaderAvaliableFailed() {
|
|
suite.Run("leader not available", func() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 1,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
TargetVersion: 1011,
|
|
}
|
|
mockTargetManager := meta.NewMockTargetManager(suite.T())
|
|
mockTargetManager.EXPECT().GetSealedSegmentsByChannel(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[int64]*datapb.SegmentInfo{
|
|
2: {
|
|
ID: 2,
|
|
InsertChannel: "test",
|
|
},
|
|
}).Maybe()
|
|
mockTargetManager.EXPECT().GetCollectionTargetVersion(mock.Anything, mock.Anything, mock.Anything).Return(1011).Maybe()
|
|
// leader nodeID=1 not available
|
|
suite.setNodeAvailable(2)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, mockTargetManager, leadview, meta.CurrentTarget)
|
|
suite.Error(err)
|
|
})
|
|
|
|
suite.Run("shard worker not available", func() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 11111,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
TargetVersion: 1011,
|
|
}
|
|
|
|
mockTargetManager := meta.NewMockTargetManager(suite.T())
|
|
mockTargetManager.EXPECT().GetSealedSegmentsByChannel(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[int64]*datapb.SegmentInfo{
|
|
2: {
|
|
ID: 2,
|
|
InsertChannel: "test",
|
|
},
|
|
}).Maybe()
|
|
mockTargetManager.EXPECT().GetCollectionTargetVersion(mock.Anything, mock.Anything, mock.Anything).Return(1011).Maybe()
|
|
// leader nodeID=2 not available
|
|
suite.setNodeAvailable(1)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, mockTargetManager, leadview, meta.CurrentTarget)
|
|
suite.Error(err)
|
|
})
|
|
|
|
suite.Run("segment lacks", func() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 1,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
TargetVersion: 1011,
|
|
}
|
|
mockTargetManager := meta.NewMockTargetManager(suite.T())
|
|
mockTargetManager.EXPECT().GetSealedSegmentsByChannel(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[int64]*datapb.SegmentInfo{
|
|
// target segmentID=1 not in leadView
|
|
1: {
|
|
ID: 1,
|
|
InsertChannel: "test",
|
|
},
|
|
}).Maybe()
|
|
mockTargetManager.EXPECT().GetCollectionTargetVersion(mock.Anything, mock.Anything, mock.Anything).Return(1011).Maybe()
|
|
suite.setNodeAvailable(1, 2)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, mockTargetManager, leadview, meta.CurrentTarget)
|
|
suite.Error(err)
|
|
})
|
|
|
|
suite.Run("target version not synced", func() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 1,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
}
|
|
mockTargetManager := meta.NewMockTargetManager(suite.T())
|
|
mockTargetManager.EXPECT().GetSealedSegmentsByChannel(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[int64]*datapb.SegmentInfo{
|
|
// target segmentID=1 not in leadView
|
|
1: {
|
|
ID: 1,
|
|
InsertChannel: "test",
|
|
},
|
|
}).Maybe()
|
|
mockTargetManager.EXPECT().GetCollectionTargetVersion(mock.Anything, mock.Anything, mock.Anything).Return(1011).Maybe()
|
|
suite.setNodeAvailable(1, 2)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, mockTargetManager, leadview, meta.CurrentTarget)
|
|
suite.Error(err)
|
|
})
|
|
|
|
suite.Run("catching up streaming data", func() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 1,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
TargetVersion: 1011,
|
|
Status: &querypb.LeaderViewStatus{
|
|
Serviceable: true,
|
|
CatchingUpStreamingData: true, // still catching up
|
|
},
|
|
}
|
|
// When catching up, function returns early without calling targetMgr
|
|
// so we can pass nil as targetMgr
|
|
suite.setNodeAvailable(1, 2)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, nil, leadview, meta.CurrentTarget)
|
|
suite.Error(err)
|
|
suite.Contains(err.Error(), "catching up streaming data")
|
|
})
|
|
|
|
suite.Run("caught up streaming data", func() {
|
|
leadview := &meta.LeaderView{
|
|
ID: 1,
|
|
Channel: "test",
|
|
Segments: map[int64]*querypb.SegmentDist{2: {NodeID: 2}},
|
|
TargetVersion: 1011,
|
|
Status: &querypb.LeaderViewStatus{
|
|
Serviceable: true,
|
|
CatchingUpStreamingData: false, // already caught up
|
|
},
|
|
}
|
|
// Use mockey to mock TargetManager.GetSealedSegmentsByChannel
|
|
targetMgr := &meta.TargetManager{}
|
|
mockGetSealedSegments := mockey.Mock(mockey.GetMethod(targetMgr, "GetSealedSegmentsByChannel")).
|
|
Return(map[int64]*datapb.SegmentInfo{
|
|
2: {
|
|
ID: 2,
|
|
InsertChannel: "test",
|
|
},
|
|
}).Build()
|
|
defer mockGetSealedSegments.UnPatch()
|
|
|
|
suite.setNodeAvailable(1, 2)
|
|
err := CheckDelegatorDataReady(suite.nodeMgr, targetMgr, leadview, meta.CurrentTarget)
|
|
suite.NoError(err)
|
|
})
|
|
}
|
|
|
|
func (suite *UtilTestSuite) TestGetChannelRWAndRONodesFor260() {
|
|
nodes := []int64{1, 2, 3, 4, 5}
|
|
nodeManager := session.NewNodeManager()
|
|
r := meta.NewReplica(&querypb.Replica{
|
|
Nodes: nodes,
|
|
})
|
|
rwNodes, roNodes := GetChannelRWAndRONodesFor260(r, nodeManager)
|
|
suite.ElementsMatch(rwNodes, []int64{})
|
|
suite.ElementsMatch(roNodes, []int64{1, 2, 3, 4, 5})
|
|
|
|
nodeManager.Add(session.NewNodeInfo(session.ImmutableNodeInfo{
|
|
NodeID: 1,
|
|
Address: "127.0.0.1:0",
|
|
Hostname: "localhost",
|
|
Version: semver.MustParse("2.5.0"),
|
|
}))
|
|
nodeManager.Add(session.NewNodeInfo(session.ImmutableNodeInfo{
|
|
NodeID: 2,
|
|
Address: "127.0.0.1:0",
|
|
Hostname: "localhost",
|
|
Version: semver.MustParse("2.6.0-dev"),
|
|
}))
|
|
rwNodes, roNodes = GetChannelRWAndRONodesFor260(r, nodeManager)
|
|
suite.ElementsMatch(rwNodes, []int64{})
|
|
suite.ElementsMatch(roNodes, []int64{1, 3, 4, 5})
|
|
}
|
|
|
|
func (suite *UtilTestSuite) TestFilterOutNodeLessThan260() {
|
|
nodes := []int64{1, 2, 3, 4, 5}
|
|
nodeManager := session.NewNodeManager()
|
|
filteredNodes := filterNodeLessThan260(nodes, nodeManager)
|
|
suite.ElementsMatch(filteredNodes, []int64{1, 2, 3, 4, 5})
|
|
|
|
nodeManager.Add(session.NewNodeInfo(session.ImmutableNodeInfo{
|
|
NodeID: 1,
|
|
Address: "127.0.0.1:0",
|
|
Hostname: "localhost",
|
|
Version: semver.MustParse("2.5.0"),
|
|
}))
|
|
filteredNodes = filterNodeLessThan260(nodes, nodeManager)
|
|
suite.ElementsMatch(filteredNodes, []int64{1, 2, 3, 4, 5})
|
|
|
|
nodeManager.Add(session.NewNodeInfo(session.ImmutableNodeInfo{
|
|
NodeID: 2,
|
|
Address: "127.0.0.1:0",
|
|
Hostname: "localhost",
|
|
Version: semver.MustParse("2.6.0-dev"),
|
|
}))
|
|
filteredNodes = filterNodeLessThan260(nodes, nodeManager)
|
|
suite.ElementsMatch(filteredNodes, []int64{1, 3, 4, 5})
|
|
|
|
nodeManager.Add(session.NewNodeInfo(session.ImmutableNodeInfo{
|
|
NodeID: 3,
|
|
Address: "127.0.0.1:0",
|
|
Hostname: "localhost",
|
|
Version: semver.MustParse("2.6.0"),
|
|
}))
|
|
filteredNodes = filterNodeLessThan260(nodes, nodeManager)
|
|
suite.ElementsMatch(filteredNodes, []int64{1, 4, 5})
|
|
}
|
|
|
|
func TestUtilSuite(t *testing.T) {
|
|
suite.Run(t, new(UtilTestSuite))
|
|
}
|