mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
issue: #46540 Empty timetick is just used to sync up the time clock between different component in milvus. So empty timetick can be ignored if we achieve the lsn/mvcc semantic for timetick. Currently, some components need the empty timetick to trigger some operation, such as flush/tsafe. So we only slow down the empty time tick for 5 seconds. <!-- This is an auto-generated comment: release notes by coderabbit.ai --> - Core invariant: with LSN/MVCC semantics consumers only need (a) the first timetick that advances the latest-required-MVCC to unblock MVCC-dependent waits and (b) occasional periodic timeticks (~≤5s) for clock synchronization—therefore frequent non-persisted empty timeticks can be suppressed without breaking MVCC correctness. - Logic removed/simplified: per-message dispatch/consumption of frequent non-persisted empty timeticks is suppressed — an MVCC-aware filter emptyTimeTickSlowdowner (internal/util/pipeline/consuming_slowdown.go) short-circuits frequent empty timeticks in the stream pipeline (internal/util/pipeline/stream_pipeline.go), and the WAL flusher rate-limits non-persisted timetick dispatch to one emission per ~5s (internal/streamingnode/server/flusher/flusherimpl/wal_flusher.go); the delegator exposes GetLatestRequiredMVCCTimeTick to drive the filter (internal/querynodev2/delegator/delegator.go). - Why this does NOT introduce data loss or regressions: the slowdowner always refreshes latestRequiredMVCCTimeTick via GetLatestRequiredMVCCTimeTick and (1) never filters timeticks < latestRequiredMVCCTimeTick (so existing tsafe/flush waits stay unblocked) and (2) always lets the first timetick ≥ latestRequiredMVCCTimeTick pass to notify pending MVCC waits; separately, WAL flusher suppression applies only to non-persisted timeticks and still emits when the 5s threshold elapses, preserving periodic clock-sync messages used by flush/tsafe. - Enhancement summary (where it takes effect): adds GetLatestRequiredMVCCTimeTick on ShardDelegator and LastestMVCCTimeTickGetter, wires emptyTimeTickSlowdowner into NewPipelineWithStream (internal/util/pipeline), and adds WAL flusher rate-limiting + metrics (internal/streamingnode/server/flusher/flusherimpl/wal_flusher.go, pkg/metrics) to reduce CPU/dispatch overhead while keeping MVCC correctness and periodic synchronization. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: chyezh <chyezh@outlook.com>
126 lines
3.4 KiB
Go
126 lines
3.4 KiB
Go
package helper
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"testing"
|
|
"time"
|
|
|
|
"go.uber.org/zap"
|
|
"google.golang.org/grpc"
|
|
|
|
client "github.com/milvus-io/milvus/client/v2/milvusclient"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/tests/go_client/base"
|
|
"github.com/milvus-io/milvus/tests/go_client/common"
|
|
)
|
|
|
|
var (
|
|
addr = flag.String("addr", "http://localhost:19530", "server host and port")
|
|
user = flag.String("user", "root", "user")
|
|
password = flag.String("password", "Milvus", "password")
|
|
logLevel = flag.String("log.level", "info", "log level for test")
|
|
teiEndpoint = flag.String("tei_endpoint", "http://text-embeddings-service.milvus-ci.svc.cluster.local:80", "TEI service endpoint for text embedding tests")
|
|
teiRerankerEndpoint = flag.String("tei_reranker_uri", "http://text-rerank-service.milvus-ci.svc.cluster.local:80", "TEI reranker service endpoint")
|
|
teiModelDim = flag.Int("tei_model_dim", 768, "Vector dimension for text embedding model")
|
|
defaultClientConfig *client.ClientConfig
|
|
)
|
|
|
|
func setDefaultClientConfig(cfg *client.ClientConfig) {
|
|
defaultClientConfig = cfg
|
|
}
|
|
|
|
func GetDefaultClientConfig() *client.ClientConfig {
|
|
newCfg := *defaultClientConfig
|
|
dialOptions := newCfg.DialOptions
|
|
newDialOptions := make([]grpc.DialOption, len(dialOptions))
|
|
copy(newDialOptions, dialOptions)
|
|
newCfg.DialOptions = newDialOptions
|
|
return &newCfg
|
|
}
|
|
|
|
func GetAddr() string {
|
|
return *addr
|
|
}
|
|
|
|
func GetUser() string {
|
|
return *user
|
|
}
|
|
|
|
func GetPassword() string {
|
|
return *password
|
|
}
|
|
|
|
func GetTEIEndpoint() string {
|
|
return *teiEndpoint
|
|
}
|
|
|
|
func GetTEIRerankerEndpoint() string {
|
|
return *teiRerankerEndpoint
|
|
}
|
|
|
|
func GetTEIModelDim() int {
|
|
return *teiModelDim
|
|
}
|
|
|
|
func parseLogConfig() {
|
|
log.Info("Parser Log Level", zap.String("logLevel", *logLevel))
|
|
switch *logLevel {
|
|
case "debug", "DEBUG", "Debug":
|
|
log.SetLevel(zap.DebugLevel)
|
|
case "info", "INFO", "Info":
|
|
log.SetLevel(zap.InfoLevel)
|
|
case "warn", "WARN", "Warn":
|
|
log.SetLevel(zap.WarnLevel)
|
|
case "error", "ERROR", "Error":
|
|
log.SetLevel(zap.ErrorLevel)
|
|
default:
|
|
log.SetLevel(zap.InfoLevel)
|
|
}
|
|
}
|
|
|
|
func setup() {
|
|
log.Info("Start to setup all......")
|
|
flag.Parse()
|
|
parseLogConfig()
|
|
log.Info("Parser Milvus address", zap.String("address", *addr))
|
|
|
|
// set default milvus client config
|
|
setDefaultClientConfig(&client.ClientConfig{Address: *addr})
|
|
}
|
|
|
|
// Teardown teardown
|
|
func teardown() {
|
|
log.Info("Start to tear down all.....")
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*common.DefaultTimeout)
|
|
defer cancel()
|
|
mc, err := base.NewMilvusClient(ctx, &client.ClientConfig{Address: GetAddr(), Username: GetUser(), Password: GetPassword()})
|
|
if err != nil {
|
|
log.Error("teardown failed to connect milvus with error", zap.Error(err))
|
|
}
|
|
defer mc.Close(ctx)
|
|
|
|
// clear dbs
|
|
dbs, _ := mc.ListDatabase(ctx, client.NewListDatabaseOption())
|
|
for _, db := range dbs {
|
|
if db != common.DefaultDb {
|
|
_ = mc.UseDatabase(ctx, client.NewUseDatabaseOption(db))
|
|
collections, _ := mc.ListCollections(ctx, client.NewListCollectionOption())
|
|
for _, coll := range collections {
|
|
_ = mc.DropCollection(ctx, client.NewDropCollectionOption(coll))
|
|
}
|
|
_ = mc.DropDatabase(ctx, client.NewDropDatabaseOption(db))
|
|
}
|
|
}
|
|
}
|
|
|
|
func RunTests(m *testing.M) int {
|
|
setup()
|
|
code := m.Run()
|
|
if code != 0 {
|
|
log.Error("Tests failed and exited", zap.Int("code", code))
|
|
}
|
|
teardown()
|
|
return code
|
|
}
|