milvus/internal/distributed/streaming/msgstream_adaptor_test.go
yihao.dai b18ebd9468
enhance: Remove legacy cdc/replication (#46603)
issue: https://github.com/milvus-io/milvus/issues/44123

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
- Core invariant: legacy in-cluster CDC/replication plumbing
(ReplicateMsg types, ReplicateID-based guards and flags) is obsolete —
the system relies on standard msgstream positions, subPos/end-ts
semantics and timetick ordering as the single source of truth for
message ordering and skipping, so replication-specific
channels/types/guards can be removed safely.

- Removed/simplified logic (what and why): removed replication feature
flags and params (ReplicateMsgChannel, TTMsgEnabled,
CollectionReplicateEnable), ReplicateMsg type and its tests, ReplicateID
constants/helpers and MergeProperties hooks, ReplicateConfig and its
propagation (streamPipeline, StreamConfig, dispatcher, target),
replicate-aware dispatcher/pipeline branches, and replicate-mode
pre-checks/timestamp-allocation in proxy tasks — these implemented a
redundant alternate “replicate-mode” pathway that duplicated
position/end-ts and timetick logic.

- Why this does NOT cause data loss or regression (concrete code paths):
no persistence or core write paths were removed — proxy PreExecute flows
(internal/proxy/task_*.go) still perform the same schema/ID/size
validations and then follow the normal non-replicate execution path;
dispatcher and pipeline continue to use position/subPos and
pullback/end-ts in Seek/grouping (pkg/mq/msgdispatcher/dispatcher.go,
internal/util/pipeline/stream_pipeline.go), so skipping and ordering
behavior remains unchanged; timetick emission in rootcoord
(sendMinDdlTsAsTt) is now ungated (no silent suppression), preserving or
increasing timetick delivery rather than removing it.

- PR type and net effect: Enhancement/Refactor — removes deprecated
replication API surface (types, helpers, config, tests) and replication
branches, simplifies public APIs and constructor signatures, and reduces
surface area for future maintenance while keeping DML/DDL persistence,
ordering, and seek semantics intact.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

---------

Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
2025-12-30 14:53:21 +08:00

165 lines
3.8 KiB
Go

package streaming
import (
"context"
"testing"
"github.com/milvus-io/milvus/pkg/v2/mq/common"
"github.com/milvus-io/milvus/pkg/v2/mq/msgstream"
)
func TestDelegatorMsgstreamFactory(t *testing.T) {
factory := NewDelegatorMsgstreamFactory()
// Test NewMsgStream
t.Run("NewMsgStream", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("NewMsgStream should panic but did not")
}
}()
_, _ = factory.NewMsgStream(context.Background())
})
// Test NewTtMsgStream
t.Run("NewTtMsgStream", func(t *testing.T) {
stream, err := factory.NewTtMsgStream(context.Background())
if err != nil {
t.Errorf("NewTtMsgStream returned an error: %v", err)
}
if stream == nil {
t.Errorf("NewTtMsgStream returned nil stream")
}
})
// Test NewMsgStreamDisposer
t.Run("NewMsgStreamDisposer", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("NewMsgStreamDisposer should panic but did not")
}
}()
_ = factory.NewMsgStreamDisposer(context.Background())
})
}
func TestDelegatorMsgstreamAdaptor(t *testing.T) {
adaptor := &delegatorMsgstreamAdaptor{}
// Test Close
t.Run("Close", func(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Errorf("Close should not panic but did")
}
}()
adaptor.Close()
})
// Test AsProducer
t.Run("AsProducer", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("AsProducer should panic but did not")
}
}()
adaptor.AsProducer(context.Background(), []string{"channel1"})
})
// Test Produce
t.Run("Produce", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("Produce should panic but did not")
}
}()
_ = adaptor.Produce(context.Background(), &msgstream.MsgPack{})
})
// Test SetRepackFunc
t.Run("SetRepackFunc", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("SetRepackFunc should panic but did not")
}
}()
adaptor.SetRepackFunc(nil)
})
// Test GetProduceChannels
t.Run("GetProduceChannels", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("GetProduceChannels should panic but did not")
}
}()
_ = adaptor.GetProduceChannels()
})
// Test Broadcast
t.Run("Broadcast", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("Broadcast should panic but did not")
}
}()
_, _ = adaptor.Broadcast(context.Background(), &msgstream.MsgPack{})
})
// Test AsConsumer
t.Run("AsConsumer", func(t *testing.T) {
err := adaptor.AsConsumer(context.Background(), []string{"channel1"}, "subName", common.SubscriptionPositionUnknown)
if err != nil {
t.Errorf("AsConsumer returned an error: %v", err)
}
})
// Test Chan
t.Run("Chan", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("Seek should panic if len(msgPositions) != 1 but did not")
}
}()
adaptor.Chan()
})
// Test GetUnmarshalDispatcher
t.Run("GetUnmarshalDispatcher", func(t *testing.T) {
dispatcher := adaptor.GetUnmarshalDispatcher()
if dispatcher == nil {
t.Errorf("GetUnmarshalDispatcher returned nil")
}
})
// Test Seek
t.Run("Seek", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("Seek should panic if len(msgPositions) != 1 but did not")
}
}()
_ = adaptor.Seek(context.Background(), []*msgstream.MsgPosition{}, true)
})
// Test GetLatestMsgID
t.Run("GetLatestMsgID", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("GetLatestMsgID should panic but did not")
}
}()
_, _ = adaptor.GetLatestMsgID("channel1")
})
// Test CheckTopicValid
t.Run("CheckTopicValid", func(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("CheckTopicValid should panic but did not")
}
}()
_ = adaptor.CheckTopicValid("channel1")
})
}