Make Params singleton (#20088)

Signed-off-by: Enwei Jiao <enwei.jiao@zilliz.com>

Signed-off-by: Enwei Jiao <enwei.jiao@zilliz.com>
This commit is contained in:
Enwei Jiao 2022-11-04 14:25:38 +08:00 committed by GitHub
parent 15b2d1cace
commit 956c5e1b9d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
124 changed files with 1126 additions and 1132 deletions

View File

@ -30,17 +30,13 @@ import (
"go.uber.org/zap"
"github.com/milvus-io/milvus/cmd/components"
"github.com/milvus-io/milvus/internal/datacoord"
"github.com/milvus-io/milvus/internal/datanode"
"github.com/milvus-io/milvus/internal/indexcoord"
"github.com/milvus-io/milvus/internal/indexnode"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/management/healthz"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proxy"
querycoord "github.com/milvus-io/milvus/internal/querycoordv2"
"github.com/milvus-io/milvus/internal/querynode"
"github.com/milvus-io/milvus/internal/rootcoord"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
@ -50,8 +46,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var Params paramtable.ComponentParam
// all milvus related metrics is in a separate registry
var Registry *prometheus.Registry
@ -74,7 +68,6 @@ type component interface {
func runComponent[T component](ctx context.Context,
localMsg bool,
params *paramtable.ComponentParam,
extraInit func(),
creator func(context.Context, dependency.Factory) (T, error),
metricRegister func(*prometheus.Registry)) T {
@ -83,7 +76,7 @@ func runComponent[T component](ctx context.Context,
wg.Add(1)
go func() {
params.InitOnce()
params := paramtable.Get()
if extraInit != nil {
extraInit()
}
@ -136,11 +129,11 @@ func (mr *MilvusRoles) printLDPreLoad() {
}
func (mr *MilvusRoles) runRootCoord(ctx context.Context, localMsg bool) *components.RootCoord {
return runComponent(ctx, localMsg, &rootcoord.Params, nil, components.NewRootCoord, metrics.RegisterRootCoord)
return runComponent(ctx, localMsg, nil, components.NewRootCoord, metrics.RegisterRootCoord)
}
func (mr *MilvusRoles) runProxy(ctx context.Context, localMsg bool, alias string) *components.Proxy {
return runComponent(ctx, localMsg, &proxy.Params,
return runComponent(ctx, localMsg,
func() {
proxy.Params.ProxyCfg.InitAlias(alias)
},
@ -149,11 +142,11 @@ func (mr *MilvusRoles) runProxy(ctx context.Context, localMsg bool, alias string
}
func (mr *MilvusRoles) runQueryCoord(ctx context.Context, localMsg bool) *components.QueryCoord {
return runComponent(ctx, localMsg, querycoord.Params, nil, components.NewQueryCoord, metrics.RegisterQueryCoord)
return runComponent(ctx, localMsg, nil, components.NewQueryCoord, metrics.RegisterQueryCoord)
}
func (mr *MilvusRoles) runQueryNode(ctx context.Context, localMsg bool, alias string) *components.QueryNode {
return runComponent(ctx, localMsg, &querynode.Params,
return runComponent(ctx, localMsg,
func() {
querynode.Params.QueryNodeCfg.InitAlias(alias)
},
@ -162,11 +155,11 @@ func (mr *MilvusRoles) runQueryNode(ctx context.Context, localMsg bool, alias st
}
func (mr *MilvusRoles) runDataCoord(ctx context.Context, localMsg bool) *components.DataCoord {
return runComponent(ctx, localMsg, &datacoord.Params, nil, components.NewDataCoord, metrics.RegisterDataCoord)
return runComponent(ctx, localMsg, nil, components.NewDataCoord, metrics.RegisterDataCoord)
}
func (mr *MilvusRoles) runDataNode(ctx context.Context, localMsg bool, alias string) *components.DataNode {
return runComponent(ctx, localMsg, &datanode.Params,
return runComponent(ctx, localMsg,
func() {
datanode.Params.DataNodeCfg.InitAlias(alias)
},
@ -175,11 +168,11 @@ func (mr *MilvusRoles) runDataNode(ctx context.Context, localMsg bool, alias str
}
func (mr *MilvusRoles) runIndexCoord(ctx context.Context, localMsg bool) *components.IndexCoord {
return runComponent(ctx, localMsg, &indexcoord.Params, nil, components.NewIndexCoord, metrics.RegisterIndexCoord)
return runComponent(ctx, localMsg, nil, components.NewIndexCoord, metrics.RegisterIndexCoord)
}
func (mr *MilvusRoles) runIndexNode(ctx context.Context, localMsg bool, alias string) *components.IndexNode {
return runComponent(ctx, localMsg, &indexnode.Params,
return runComponent(ctx, localMsg,
func() {
indexnode.Params.IndexNodeCfg.InitAlias(alias)
},
@ -198,10 +191,11 @@ func (mr *MilvusRoles) Run(local bool, alias string) {
if err := os.Setenv(metricsinfo.DeployModeEnvKey, metricsinfo.StandaloneDeployMode); err != nil {
log.Error("Failed to set deploy mode: ", zap.Error(err))
}
Params.Init()
paramtable.Init()
params := paramtable.Get()
if Params.RocksmqEnable() {
path, err := Params.Load("rocksmq.path")
if params.RocksmqEnable() {
path, err := params.Load("rocksmq.path")
if err != nil {
panic(err)
}
@ -212,15 +206,16 @@ func (mr *MilvusRoles) Run(local bool, alias string) {
defer stopRocksmq()
}
if Params.EtcdCfg.UseEmbedEtcd {
if params.EtcdCfg.UseEmbedEtcd {
// Start etcd server.
etcd.InitEtcdServer(&Params.EtcdCfg)
etcd.InitEtcdServer(&params.EtcdCfg)
defer etcd.StopEtcdServer()
}
} else {
if err := os.Setenv(metricsinfo.DeployModeEnvKey, metricsinfo.ClusterDeployMode); err != nil {
log.Error("Failed to set deploy mode: ", zap.Error(err))
}
paramtable.Init()
}
if os.Getenv(metricsinfo.DeployModeEnvKey) == metricsinfo.StandaloneDeployMode {

View File

@ -23,6 +23,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
)
// allocator is the interface that allocating `UniqueID` or `Timestamp`
@ -54,7 +55,7 @@ func (alloc *rootCoordAllocator) allocTimestamp(ctx context.Context) (Timestamp,
commonpbutil.WithMsgType(commonpb.MsgType_RequestTSO),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
Count: 1,
})
@ -71,7 +72,7 @@ func (alloc *rootCoordAllocator) allocID(ctx context.Context) (UniqueID, error)
commonpbutil.WithMsgType(commonpb.MsgType_RequestID),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
Count: 1,
})

View File

@ -20,10 +20,12 @@ import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/stretchr/testify/assert"
)
func TestAllocator_Basic(t *testing.T) {
paramtable.Init()
ms := newMockRootCoordService()
allocator := newRootCoordAllocator(ms)
ctx := context.Background()

View File

@ -25,6 +25,7 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/samber/lo"
"go.uber.org/zap"
)
@ -103,7 +104,7 @@ func (c *Cluster) Flush(ctx context.Context, nodeID int64, channel string,
req := &datapb.FlushSegmentsRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_Flush),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
commonpbutil.WithTargetID(nodeID),
),
CollectionID: ch.CollectionID,

View File

@ -33,7 +33,6 @@ import (
)
func getMetaKv(t *testing.T) kv.MetaKv {
Params.Init()
rootPath := "/etcd/test/root/" + t.Name()
metakv, err := etcdkv.NewMetaKvFactory(rootPath, &Params.EtcdCfg)
require.NoError(t, err)

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/hardware"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/util/uniquegenerator"
)
@ -89,7 +90,7 @@ func (s *Server) getSystemInfoMetrics(
coordTopology := metricsinfo.DataCoordTopology{
Cluster: clusterTopology,
Connections: metricsinfo.ConnTopology{
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
// TODO(dragondriver): fill ConnectedComponents if necessary
ConnectedComponents: []metricsinfo.ConnectionInfo{},
},
@ -100,7 +101,7 @@ func (s *Server) getSystemInfoMetrics(
ErrorCode: commonpb.ErrorCode_UnexpectedError,
},
Response: "",
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
}
var err error
resp.Response, err = metricsinfo.MarshalTopology(coordTopology)
@ -117,7 +118,7 @@ func (s *Server) getSystemInfoMetrics(
func (s *Server) getDataCoordMetrics() metricsinfo.DataCoordInfos {
ret := metricsinfo.DataCoordInfos{
BaseComponentInfos: metricsinfo.BaseComponentInfos{
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
HardwareInfos: metricsinfo.HardwareMetrics{
IP: s.session.Address,
CPUCoreCount: hardware.GetCPUNum(),

View File

@ -83,7 +83,7 @@ type rootCoordCreatorFunc func(ctx context.Context, metaRootPath string, etcdCli
// makes sure Server implements `DataCoord`
var _ types.DataCoord = (*Server)(nil)
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Server implements `types.DataCoord`
// handles Data Coordinator related jobs
@ -97,6 +97,7 @@ type Server struct {
helper ServerHelper
etcdCli *clientv3.Client
address string
kvClient *etcdkv.EtcdKV
meta *meta
segmentManager Manager
@ -241,10 +242,10 @@ func (s *Server) initSession() error {
if s.session == nil {
return errors.New("failed to initialize session")
}
s.session.Init(typeutil.DataCoordRole, Params.DataCoordCfg.Address, true, true)
s.session.Init(typeutil.DataCoordRole, s.address, true, true)
s.session.SetEnableActiveStandBy(s.enableActiveStandBy)
Params.DataCoordCfg.SetNodeID(s.session.ServerID)
Params.SetLogger(Params.DataCoordCfg.GetNodeID())
paramtable.SetNodeID(s.session.ServerID)
Params.SetLogger(paramtable.GetNodeID())
return nil
}
@ -252,7 +253,7 @@ func (s *Server) initSession() error {
func (s *Server) Init() error {
var err error
s.stateCode.Store(commonpb.StateCode_Initializing)
s.factory.Init(&Params)
s.factory.Init(Params)
if err = s.initRootCoordClient(); err != nil {
return err
@ -349,6 +350,10 @@ func (s *Server) initCluster() error {
return nil
}
func (s *Server) SetAddress(address string) {
s.address = address
}
// SetEtcdClient sets etcd client for datacoord.
func (s *Server) SetEtcdClient(client *clientv3.Client) {
s.etcdCli = client
@ -375,7 +380,7 @@ func (s *Server) stopCompactionTrigger() {
}
func (s *Server) newChunkManagerFactory() (storage.ChunkManager, error) {
chunkManagerFactory := storage.NewChunkManagerFactoryWithParam(&Params)
chunkManagerFactory := storage.NewChunkManagerFactoryWithParam(Params)
cli, err := chunkManagerFactory.NewPersistentStorageChunkManager(s.ctx)
if err != nil {
log.Error("chunk manager init failed", zap.Error(err))
@ -893,7 +898,7 @@ func (s *Server) loadCollectionFromRootCoord(ctx context.Context, collectionID i
resp, err := s.rootCoordClient.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
DbName: "",
CollectionID: collectionID,
@ -906,7 +911,7 @@ func (s *Server) loadCollectionFromRootCoord(ctx context.Context, collectionID i
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
DbName: "",
CollectionName: resp.Schema.Name,

View File

@ -32,6 +32,7 @@ import (
"github.com/milvus-io/milvus/internal/mocks"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
@ -60,6 +61,7 @@ import (
)
func TestMain(m *testing.M) {
paramtable.Init()
rand.Seed(time.Now().UnixNano())
os.Exit(m.Run())
}
@ -2413,7 +2415,7 @@ func TestGetCompactionState(t *testing.T) {
resp, err := svr.GetCompactionState(context.Background(), &milvuspb.GetCompactionStateRequest{})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.GetStatus().GetReason())
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.GetStatus().GetReason())
})
}
@ -2474,7 +2476,7 @@ func TestManualCompaction(t *testing.T) {
})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.Status.Reason)
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.Status.Reason)
})
}
@ -2525,7 +2527,7 @@ func TestGetCompactionStateWithPlans(t *testing.T) {
})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.Status.Reason)
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.Status.Reason)
})
}
@ -2959,7 +2961,7 @@ func TestDataCoord_Import(t *testing.T) {
})
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.GetErrorCode())
assert.Equal(t, msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()), resp.Status.GetReason())
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.Status.GetReason())
})
t.Run("test update segment stat", func(t *testing.T) {
@ -3041,7 +3043,7 @@ func TestDataCoord_SaveImportSegment(t *testing.T) {
RowNum: int64(1),
SaveBinlogPathReq: &datapb.SaveBinlogPathsRequest{
Base: &commonpb.MsgBase{
SourceID: Params.DataNodeCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
SegmentID: 100,
CollectionID: 100,

View File

@ -25,6 +25,7 @@ import (
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/errorutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"golang.org/x/sync/errgroup"
@ -606,7 +607,7 @@ func (s *Server) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
dresp, err := s.rootCoordClient.DescribeCollection(s.ctx, &milvuspb.DescribeCollectionRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
CollectionID: collectionID,
})
@ -801,14 +802,14 @@ func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
log.Debug("DataCoord.ShowConfigurations", zap.String("pattern", req.Pattern))
if s.isClosed() {
log.Warn("DataCoord.ShowConfigurations failed",
zap.Int64("nodeId", Params.DataCoordCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Pattern),
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
zap.Error(errDataCoordIsUnhealthy(paramtable.GetNodeID())))
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()),
Reason: msgDataCoordIsUnhealthy(paramtable.GetNodeID()),
},
Configuations: nil,
}, nil
@ -822,15 +823,15 @@ func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
if s.isClosed() {
log.Warn("DataCoord.GetMetrics failed",
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
zap.Error(errDataCoordIsUnhealthy(paramtable.GetNodeID())))
return &milvuspb.GetMetricsResponse{
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID()),
Reason: msgDataCoordIsUnhealthy(paramtable.GetNodeID()),
},
Response: "",
}, nil
@ -839,12 +840,12 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
metricType, err := metricsinfo.ParseMetricType(req.Request)
if err != nil {
log.Warn("DataCoord.GetMetrics failed to parse metric type",
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(err))
return &milvuspb.GetMetricsResponse{
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -856,7 +857,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
if metricType == metricsinfo.SystemInfoMetrics {
metrics, err := s.getSystemInfoMetrics(ctx, req)
if err != nil {
log.Warn("DataCoord GetMetrics failed", zap.Int64("nodeID", Params.DataCoordCfg.GetNodeID()), zap.Error(err))
log.Warn("DataCoord GetMetrics failed", zap.Int64("nodeID", paramtable.GetNodeID()), zap.Error(err))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
@ -866,7 +867,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
}
log.Debug("DataCoord.GetMetrics",
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType),
zap.Any("metrics", metrics), // TODO(dragondriver): necessary? may be very large
@ -876,12 +877,12 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
}
log.RatedWarn(60.0, "DataCoord.GetMetrics failed, request metric type is not implemented yet",
zap.Int64("node_id", Params.DataCoordCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType))
return &milvuspb.GetMetricsResponse{
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, Params.DataCoordCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataCoordRole, paramtable.GetNodeID()),
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: metricsinfo.MsgUnimplementedMetric,
@ -902,8 +903,8 @@ func (s *Server) ManualCompaction(ctx context.Context, req *milvuspb.ManualCompa
if s.isClosed() {
log.Warn("failed to execute manual compaction", zap.Int64("collectionID", req.GetCollectionID()),
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
zap.Error(errDataCoordIsUnhealthy(paramtable.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -936,8 +937,8 @@ func (s *Server) GetCompactionState(ctx context.Context, req *milvuspb.GetCompac
if s.isClosed() {
log.Warn("failed to get compaction state", zap.Int64("compactionID", req.GetCompactionID()),
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
zap.Error(errDataCoordIsUnhealthy(paramtable.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -975,8 +976,8 @@ func (s *Server) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.
}
if s.isClosed() {
log.Warn("failed to get compaction state with plans", zap.Int64("compactionID", req.GetCompactionID()), zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
log.Warn("failed to get compaction state with plans", zap.Int64("compactionID", req.GetCompactionID()), zap.Error(errDataCoordIsUnhealthy(paramtable.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -1054,8 +1055,8 @@ func (s *Server) WatchChannels(ctx context.Context, req *datapb.WatchChannelsReq
if s.isClosed() {
log.Warn("failed to watch channels request", zap.Any("channels", req.GetChannelNames()),
zap.Error(errDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
zap.Error(errDataCoordIsUnhealthy(paramtable.GetNodeID())))
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
for _, channelName := range req.GetChannelNames() {
@ -1083,7 +1084,7 @@ func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateR
if s.isClosed() {
log.Warn("DataCoord receive GetFlushState request, server closed",
zap.Int64s("segmentIDs", req.GetSegmentIDs()), zap.Int("len", len(req.GetSegmentIDs())))
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -1121,7 +1122,7 @@ func (s *Server) Import(ctx context.Context, itr *datapb.ImportTaskRequest) (*da
if s.isClosed() {
log.Error("failed to import for closed DataCoord service")
resp.Status.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
resp.Status.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -1160,7 +1161,7 @@ func (s *Server) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
}
if s.isClosed() {
log.Warn("failed to update segment stat for closed server")
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
resp.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
s.updateSegmentStatistics(req.GetStats())
@ -1192,7 +1193,7 @@ func (s *Server) AcquireSegmentLock(ctx context.Context, req *datapb.AcquireSegm
if s.isClosed() {
log.Warn("failed to acquire segments reference lock for closed server")
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
resp.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -1232,7 +1233,7 @@ func (s *Server) ReleaseSegmentLock(ctx context.Context, req *datapb.ReleaseSegm
if s.isClosed() {
log.Warn("failed to release segments reference lock for closed server")
resp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
resp.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return resp, nil
}
@ -1262,7 +1263,7 @@ func (s *Server) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSe
if s.isClosed() {
log.Warn("failed to add segment for closed server")
errResp.ErrorCode = commonpb.ErrorCode_DataCoordNA
errResp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
errResp.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return errResp, nil
}
// Look for the DataNode that watches the channel.
@ -1286,7 +1287,7 @@ func (s *Server) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSe
&datapb.AddImportSegmentRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithTimeStamp(req.GetBase().GetTimestamp()),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
SegmentId: req.GetSegmentId(),
ChannelName: req.GetChannelName(),
@ -1373,7 +1374,7 @@ func (s *Server) BroadcastAlteredCollection(ctx context.Context,
if s.isClosed() {
log.Warn("failed to broadcast collection information for closed server")
errResp.Reason = msgDataCoordIsUnhealthy(Params.DataCoordCfg.GetNodeID())
errResp.Reason = msgDataCoordIsUnhealthy(paramtable.GetNodeID())
return errResp, nil
}

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"go.uber.org/zap"
)
@ -221,7 +222,7 @@ func (c *SessionManager) execReCollectSegmentStats(ctx context.Context, nodeID i
resp, err := cli.ResendSegmentStats(ctx, &datapb.ResendSegmentStatsRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_ResendSegmentStats),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
})
if err := VerifyResponse(resp, err); err != nil {
@ -254,7 +255,7 @@ func (c *SessionManager) GetCompactionState() map[int64]*datapb.CompactionStateR
resp, err := cli.GetCompactionState(ctx, &datapb.CompactionStateRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_GetSystemConfigs),
commonpbutil.WithSourceID(Params.DataCoordCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
})
if err != nil {

View File

@ -22,6 +22,7 @@ import (
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/metautil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
@ -55,7 +56,7 @@ func (alloc *allocator) allocID() (UniqueID, error) {
commonpbutil.WithMsgType(commonpb.MsgType_RequestID),
commonpbutil.WithMsgID(1), // GOOSE TODO
commonpbutil.WithTimeStamp(0), // GOOSE TODO
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
Count: 1,
})
@ -76,7 +77,7 @@ func (alloc *allocator) allocIDBatch(count uint32) (UniqueID, uint32, error) {
resp, err := alloc.rootCoord.AllocID(ctx, &rootcoordpb.AllocIDRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_RequestID),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
Count: count,
})

View File

@ -30,6 +30,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
)
@ -114,7 +115,7 @@ func (c *ChannelMeta) segmentFlushed(segID UniqueID) {
if seg, ok := c.segments[segID]; ok {
seg.setType(datapb.SegmentType_Flushed)
}
metrics.DataNodeNumUnflushedSegments.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Dec()
metrics.DataNodeNumUnflushedSegments.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Dec()
}
// new2NormalSegment transfers a segment from *New* to *Normal*.
@ -199,7 +200,7 @@ func (c *ChannelMeta) addSegment(req addSegmentReq) error {
c.segments[req.segID] = seg
c.segMu.Unlock()
if req.segType == datapb.SegmentType_New || req.segType == datapb.SegmentType_Normal {
metrics.DataNodeNumUnflushedSegments.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
metrics.DataNodeNumUnflushedSegments.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
}
return nil
}
@ -397,7 +398,7 @@ func (c *ChannelMeta) removeSegments(segIDs ...UniqueID) {
delete(c.segments, segID)
}
metrics.DataNodeNumUnflushedSegments.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Sub(float64(cnt))
metrics.DataNodeNumUnflushedSegments.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Sub(float64(cnt))
}
// hasSegment checks whether this channel has a segment according to segment ID.

View File

@ -32,6 +32,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -588,7 +589,7 @@ func (t *compactionTask) compact() (*datapb.CompactionResult, error) {
)
log.Info("overall elapse in ms", zap.Int64("planID", t.plan.GetPlanID()), zap.Float64("elapse", nano2Milli(time.Since(compactStart))))
metrics.DataNodeCompactionLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Observe(float64(t.tr.ElapseSpan().Milliseconds()))
metrics.DataNodeCompactionLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Observe(float64(t.tr.ElapseSpan().Milliseconds()))
return pack, nil
}

View File

@ -90,7 +90,7 @@ var getFlowGraphServiceAttempts = uint(50)
var _ types.DataNode = (*DataNode)(nil)
// Params from config.yaml
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// rateCol is global rateCollector in DataNode.
var rateCol *rateCollector
@ -122,6 +122,7 @@ type DataNode struct {
compactionExecutor *compactionExecutor
etcdCli *clientv3.Client
address string
rootCoord types.RootCoord
dataCoord types.DataCoord
@ -157,6 +158,10 @@ func NewDataNode(ctx context.Context, factory dependency.Factory) *DataNode {
return node
}
func (node *DataNode) SetAddress(address string) {
node.address = address
}
// SetEtcdClient sets etcd client for DataNode
func (node *DataNode) SetEtcdClient(etcdCli *clientv3.Client) {
node.etcdCli = etcdCli
@ -210,9 +215,9 @@ func (node *DataNode) initSession() error {
if node.session == nil {
return errors.New("failed to initialize session")
}
node.session.Init(typeutil.DataNodeRole, Params.DataNodeCfg.IP+":"+strconv.Itoa(Params.DataNodeCfg.Port), false, true)
Params.DataNodeCfg.SetNodeID(node.session.ServerID)
Params.SetLogger(Params.DataNodeCfg.GetNodeID())
node.session.Init(typeutil.DataNodeRole, node.address, false, true)
paramtable.SetNodeID(node.session.ServerID)
Params.SetLogger(paramtable.GetNodeID())
return nil
}
@ -240,21 +245,21 @@ func (node *DataNode) Init() error {
err := node.initRateCollector()
if err != nil {
log.Error("DataNode server init rateCollector failed", zap.Int64("node ID", Params.QueryNodeCfg.GetNodeID()), zap.Error(err))
log.Error("DataNode server init rateCollector failed", zap.Int64("node ID", paramtable.GetNodeID()), zap.Error(err))
return err
}
log.Info("DataNode server init rateCollector done", zap.Int64("node ID", Params.QueryNodeCfg.GetNodeID()))
log.Info("DataNode server init rateCollector done", zap.Int64("node ID", paramtable.GetNodeID()))
idAllocator, err := allocator2.NewIDAllocator(node.ctx, node.rootCoord, Params.DataNodeCfg.GetNodeID())
idAllocator, err := allocator2.NewIDAllocator(node.ctx, node.rootCoord, paramtable.GetNodeID())
if err != nil {
log.Error("failed to create id allocator",
zap.Error(err),
zap.String("role", typeutil.DataNodeRole), zap.Int64("DataNode ID", Params.DataNodeCfg.GetNodeID()))
zap.String("role", typeutil.DataNodeRole), zap.Int64("DataNode ID", paramtable.GetNodeID()))
return err
}
node.rowIDAllocator = idAllocator
node.factory.Init(&Params)
node.factory.Init(Params)
log.Info("DataNode server init succeeded",
zap.String("MsgChannelSubName", Params.CommonCfg.DataNodeSubName))
@ -266,7 +271,7 @@ func (node *DataNode) StartWatchChannels(ctx context.Context) {
defer logutil.LogPanic()
// REF MEP#7 watch path should be [prefix]/channel/{node_id}/{channel_name}
// TODO, this is risky, we'd better watch etcd with revision rather simply a path
watchPrefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", Params.DataNodeCfg.GetNodeID()))
watchPrefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", paramtable.GetNodeID()))
evtChan := node.watchKv.WatchWithPrefix(watchPrefix)
// after watch, first check all exists nodes first
err := node.checkWatchedList()
@ -308,7 +313,7 @@ func (node *DataNode) StartWatchChannels(ctx context.Context) {
// serves the corner case for etcd connection lost and missing some events
func (node *DataNode) checkWatchedList() error {
// REF MEP#7 watch path should be [prefix]/channel/{node_id}/{channel_name}
prefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", Params.DataNodeCfg.GetNodeID()))
prefix := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", paramtable.GetNodeID()))
keys, values, err := node.watchKv.LoadWithPrefix(prefix)
if err != nil {
return err
@ -418,7 +423,7 @@ func (node *DataNode) handlePutEvent(watchInfo *datapb.ChannelWatchInfo, version
return fmt.Errorf("fail to marshal watchInfo with state, vChanName: %s, state: %s ,err: %w", vChanName, watchInfo.State.String(), err)
}
key := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", Params.DataNodeCfg.GetNodeID()), vChanName)
key := path.Join(Params.DataNodeCfg.ChannelWatchSubPath, fmt.Sprintf("%d", paramtable.GetNodeID()), vChanName)
success, err := node.watchKv.CompareVersionAndSwap(key, version, string(v))
// etcd error, retrying
@ -481,7 +486,7 @@ func (node *DataNode) Start() error {
commonpbutil.WithMsgType(commonpb.MsgType_RequestTSO),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
Count: 1,
})
@ -582,7 +587,7 @@ func (node *DataNode) ReadyToFlush() error {
// One precondition: The segmentID in req is in ascending order.
func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmentsRequest) (*commonpb.Status, error) {
metrics.DataNodeFlushReqCounter.WithLabelValues(
fmt.Sprint(Params.DataNodeCfg.GetNodeID()),
fmt.Sprint(paramtable.GetNodeID()),
MetricRequestsTotal).Inc()
errStatus := &commonpb.Status{
@ -671,7 +676,7 @@ func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmen
}
metrics.DataNodeFlushReqCounter.WithLabelValues(
fmt.Sprint(Params.DataNodeCfg.GetNodeID()),
fmt.Sprint(paramtable.GetNodeID()),
MetricRequestsSuccess).Inc()
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
@ -682,7 +687,7 @@ func (node *DataNode) FlushSegments(ctx context.Context, req *datapb.FlushSegmen
// It returns a list of segments to be sent.
func (node *DataNode) ResendSegmentStats(ctx context.Context, req *datapb.ResendSegmentStatsRequest) (*datapb.ResendSegmentStatsResponse, error) {
log.Info("start resending segment stats, if any",
zap.Int64("DataNode ID", Params.DataNodeCfg.GetNodeID()))
zap.Int64("DataNode ID", paramtable.GetNodeID()))
segResent := node.flowgraphManager.resendTT()
log.Info("found segment(s) with stats to resend",
zap.Int64s("segment IDs", segResent))
@ -742,14 +747,14 @@ func (node *DataNode) ShowConfigurations(ctx context.Context, req *internalpb.Sh
log.Debug("DataNode.ShowConfigurations", zap.String("pattern", req.Pattern))
if !node.isHealthy() {
log.Warn("DataNode.ShowConfigurations failed",
zap.Int64("nodeId", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Pattern),
zap.Error(errDataNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())))
zap.Error(errDataNodeIsUnhealthy(paramtable.GetNodeID())))
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgDataNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
Reason: msgDataNodeIsUnhealthy(paramtable.GetNodeID()),
},
Configuations: nil,
}, nil
@ -762,14 +767,14 @@ func (node *DataNode) ShowConfigurations(ctx context.Context, req *internalpb.Sh
func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
if !node.isHealthy() {
log.Warn("DataNode.GetMetrics failed",
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(errDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID())))
zap.Error(errDataNodeIsUnhealthy(paramtable.GetNodeID())))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID()),
Reason: msgDataNodeIsUnhealthy(paramtable.GetNodeID()),
},
}, nil
}
@ -777,14 +782,14 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
metricType, err := metricsinfo.ParseMetricType(req.Request)
if err != nil {
log.Warn("DataNode.GetMetrics failed to parse metric type",
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(err))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: fmt.Sprintf("datanode GetMetrics failed, nodeID=%d, err=%s", Params.DataNodeCfg.GetNodeID(), err.Error()),
Reason: fmt.Sprintf("datanode GetMetrics failed, nodeID=%d, err=%s", paramtable.GetNodeID(), err.Error()),
},
}, nil
}
@ -792,17 +797,17 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
if metricType == metricsinfo.SystemInfoMetrics {
systemInfoMetrics, err := node.getSystemInfoMetrics(ctx, req)
if err != nil {
log.Warn("DataNode GetMetrics failed", zap.Int64("nodeID", Params.DataNodeCfg.GetNodeID()), zap.Error(err))
log.Warn("DataNode GetMetrics failed", zap.Int64("nodeID", paramtable.GetNodeID()), zap.Error(err))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: fmt.Sprintf("datanode GetMetrics failed, nodeID=%d, err=%s", Params.DataNodeCfg.GetNodeID(), err.Error()),
Reason: fmt.Sprintf("datanode GetMetrics failed, nodeID=%d, err=%s", paramtable.GetNodeID(), err.Error()),
},
}, nil
}
log.Debug("DataNode.GetMetrics",
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType),
zap.Any("systemInfoMetrics", systemInfoMetrics), // TODO(dragondriver): necessary? may be very large
@ -812,7 +817,7 @@ func (node *DataNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
}
log.Debug("DataNode.GetMetrics failed, request metric type is not implemented yet",
zap.Int64("node_id", Params.DataNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType))
@ -986,7 +991,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
ErrorCode: commonpb.ErrorCode_Success,
},
TaskId: req.GetImportTask().TaskId,
DatanodeId: Params.DataNodeCfg.GetNodeID(),
DatanodeId: paramtable.GetNodeID(),
State: commonpb.ImportState_ImportStarted,
Segments: make([]int64, 0),
AutoIds: make([]int64, 0),
@ -1014,11 +1019,11 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
zap.Int64("collection ID", req.GetImportTask().GetCollectionId()),
zap.Int64("partition ID", req.GetImportTask().GetPartitionId()),
zap.Int64("task ID", req.GetImportTask().GetTaskId()),
zap.Error(errDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID())))
zap.Error(errDataNodeIsUnhealthy(paramtable.GetNodeID())))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgDataNodeIsUnhealthy(Params.DataNodeCfg.GetNodeID()),
Reason: msgDataNodeIsUnhealthy(paramtable.GetNodeID()),
}, nil
}
@ -1029,7 +1034,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
commonpbutil.WithMsgType(commonpb.MsgType_RequestTSO),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
Count: 1,
})
@ -1135,7 +1140,7 @@ func (node *DataNode) AddImportSegment(ctx context.Context, req *datapb.AddImpor
if err != nil {
log.Error("channel not found in current DataNode",
zap.String("channel name", req.GetChannelName()),
zap.Int64("node ID", Params.DataNodeCfg.GetNodeID()))
zap.Int64("node ID", paramtable.GetNodeID()))
return &datapb.AddImportSegmentResponse{
Status: &commonpb.Status{
// TODO: Add specific error code.
@ -1302,7 +1307,7 @@ func saveSegmentFunc(node *DataNode, req *datapb.ImportTaskRequest, res *rootcoo
resp, err := node.dataCoord.SaveImportSegment(context.Background(), &datapb.SaveImportSegmentRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithTimeStamp(ts), // Pass current timestamp downstream.
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
SegmentId: segmentID,
ChannelName: targetChName,
@ -1314,7 +1319,7 @@ func saveSegmentFunc(node *DataNode, req *datapb.ImportTaskRequest, res *rootcoo
commonpbutil.WithMsgType(0),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(ts),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
SegmentID: segmentID,
CollectionID: req.GetImportTask().GetCollectionId(),

View File

@ -44,6 +44,7 @@ import (
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/importutil"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -93,7 +94,7 @@ func TestDataNode(t *testing.T) {
defer node.Stop()
node.chunkManager = storage.NewLocalChunkManager(storage.RootPath("/tmp/lib/milvus"))
Params.DataNodeCfg.SetNodeID(1)
paramtable.SetNodeID(1)
t.Run("Test WatchDmChannels ", func(t *testing.T) {
emptyNode := &DataNode{}
@ -758,7 +759,7 @@ func TestDataNode_AddSegment(t *testing.T) {
defer node.Stop()
node.chunkManager = storage.NewLocalChunkManager(storage.RootPath("/tmp/lib/milvus"))
Params.DataNodeCfg.SetNodeID(1)
paramtable.SetNodeID(1)
t.Run("test AddSegment", func(t *testing.T) {
node.rootCoord = &RootCoordFactory{
@ -834,15 +835,15 @@ func TestWatchChannel(t *testing.T) {
// GOOSE TODO
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
oldInvalidCh := "datanode-etcd-test-by-dev-rootcoord-dml-channel-invalid"
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), oldInvalidCh)
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID(), oldInvalidCh)
err = kv.Save(path, string([]byte{23}))
assert.NoError(t, err)
ch := fmt.Sprintf("datanode-etcd-test-by-dev-rootcoord-dml-channel_%d", rand.Int31())
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), ch)
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID(), ch)
c := make(chan struct{})
go func() {
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID()))
c <- struct{}{}
cnt := 0
for {
@ -881,7 +882,7 @@ func TestWatchChannel(t *testing.T) {
exist := node.flowgraphManager.exist(ch)
assert.True(t, exist)
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID()))
assert.Nil(t, err)
//TODO there is not way to sync Release done, use sleep for now
time.Sleep(100 * time.Millisecond)
@ -893,15 +894,15 @@ func TestWatchChannel(t *testing.T) {
t.Run("Test release channel", func(t *testing.T) {
kv := etcdkv.NewEtcdKV(etcdCli, Params.EtcdCfg.MetaRootPath)
oldInvalidCh := "datanode-etcd-test-by-dev-rootcoord-dml-channel-invalid"
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), oldInvalidCh)
path := fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID(), oldInvalidCh)
err = kv.Save(path, string([]byte{23}))
assert.NoError(t, err)
ch := fmt.Sprintf("datanode-etcd-test-by-dev-rootcoord-dml-channel_%d", rand.Int31())
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID(), ch)
path = fmt.Sprintf("%s/%d/%s", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID(), ch)
c := make(chan struct{})
go func() {
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
ec := kv.WatchWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID()))
c <- struct{}{}
cnt := 0
for {
@ -940,7 +941,7 @@ func TestWatchChannel(t *testing.T) {
exist := node.flowgraphManager.exist(ch)
assert.False(t, exist)
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, Params.DataNodeCfg.GetNodeID()))
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.DataNodeCfg.ChannelWatchSubPath, paramtable.GetNodeID()))
assert.Nil(t, err)
//TODO there is not way to sync Release done, use sleep for now
time.Sleep(100 * time.Millisecond)

View File

@ -35,6 +35,7 @@ import (
"github.com/milvus-io/milvus/internal/util/concurrency"
"github.com/milvus-io/milvus/internal/util/flowgraph"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
)
// dataSyncService controls a flowgraph for a specific collection
@ -139,8 +140,8 @@ func (dsService *dataSyncService) close() {
log.Info("dataSyncService closing flowgraph", zap.Int64("collectionID", dsService.collectionID),
zap.String("vChanName", dsService.vchannelName))
dsService.fg.Close()
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Dec()
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Sub(2) // timeTickChannel + deltaChannel
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Dec()
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Sub(2) // timeTickChannel + deltaChannel
}
dsService.clearGlobalFlushingCache()
@ -351,7 +352,7 @@ func (dsService *dataSyncService) getSegmentInfos(segmentIDs []int64) ([]*datapb
commonpbutil.WithMsgType(commonpb.MsgType_SegmentInfo),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
SegmentIDs: segmentIDs,
IncludeUnHealthy: true,
@ -376,7 +377,7 @@ func (dsService *dataSyncService) getChannelLatestMsgID(ctx context.Context, cha
}
defer dmlStream.Close()
subName := fmt.Sprintf("datanode-%d-%s-%d", Params.DataNodeCfg.GetNodeID(), channelName, segmentID)
subName := fmt.Sprintf("datanode-%d-%s-%d", paramtable.GetNodeID(), channelName, segmentID)
log.Debug("dataSyncService register consumer for getChannelLatestMsgID",
zap.String("pChannelName", pChannelName),
zap.String("subscription", subName),

View File

@ -38,6 +38,7 @@ import (
"github.com/milvus-io/milvus/internal/util/flowgraph"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/tsoutil"
@ -171,7 +172,7 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
}
rateCol.Add(metricsinfo.InsertConsumeThroughput, float64(proto.Size(&imsg.InsertRequest)))
metrics.DataNodeConsumeCounter.WithLabelValues(strconv.FormatInt(Params.DataNodeCfg.GetNodeID(), 10), metrics.InsertLabel).Add(float64(proto.Size(&imsg.InsertRequest)))
metrics.DataNodeConsumeCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.InsertLabel).Add(float64(proto.Size(&imsg.InsertRequest)))
log.Debug("DDNode receive insert messages",
zap.Int("numRows", len(imsg.GetRowIDs())),
@ -194,7 +195,7 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
continue
}
rateCol.Add(metricsinfo.DeleteConsumeThroughput, float64(proto.Size(&dmsg.DeleteRequest)))
metrics.DataNodeConsumeCounter.WithLabelValues(strconv.FormatInt(Params.DataNodeCfg.GetNodeID(), 10), metrics.DeleteLabel).Add(float64(proto.Size(&dmsg.DeleteRequest)))
metrics.DataNodeConsumeCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.DeleteLabel).Add(float64(proto.Size(&dmsg.DeleteRequest)))
fgMsg.deleteMessages = append(fgMsg.deleteMessages, dmsg)
}
}
@ -289,7 +290,7 @@ func (ddn *ddNode) sendDeltaTimeTick(ts Timestamp) error {
commonpbutil.WithMsgType(commonpb.MsgType_TimeTick),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(ts),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
}
timeTickMsg := &msgstream.TimeTickMsg{
@ -341,7 +342,7 @@ func newDDNode(ctx context.Context, collID UniqueID, vChannelName string, droppe
}
deltaStream.SetRepackFunc(msgstream.DefaultRepackFunc)
deltaStream.AsProducer([]string{deltaChannelName})
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
log.Info("datanode AsProducer", zap.String("DeltaChannelName", deltaChannelName))
var deltaMsgStream msgstream.MsgStream = deltaStream
deltaMsgStream.Start()

View File

@ -29,6 +29,7 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/trace"
)
@ -236,7 +237,7 @@ func (dn *deleteNode) bufferDeleteMsg(msg *msgstream.DeleteMsg, tr TimeRange) ([
// store
delDataBuf.updateSize(int64(rows))
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Add(float64(rows))
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.DeleteLabel).Add(float64(rows))
delDataBuf.updateTimeRange(tr)
dn.delBuf.Store(segID, delDataBuf)
}

View File

@ -29,6 +29,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/flowgraph"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/tsoutil"
)
@ -38,7 +39,7 @@ import (
func newDmInputNode(ctx context.Context, seekPos *internalpb.MsgPosition, dmNodeConfig *nodeConfig) (*flowgraph.InputNode, error) {
// subName should be unique, since pchannelName is shared among several collections
// use vchannel in case of reuse pchannel for same collection
consumeSubName := fmt.Sprintf("%s-%d-%s", Params.CommonCfg.DataNodeSubName, Params.DataNodeCfg.GetNodeID(), dmNodeConfig.vChannelName)
consumeSubName := fmt.Sprintf("%s-%d-%s", Params.CommonCfg.DataNodeSubName, paramtable.GetNodeID(), dmNodeConfig.vChannelName)
insertStream, err := dmNodeConfig.msFactory.NewTtMsgStream(ctx)
if err != nil {
return nil, err
@ -74,7 +75,7 @@ func newDmInputNode(ctx context.Context, seekPos *internalpb.MsgPosition, dmNode
} else {
insertStream.AsConsumer([]string{pchannelName}, consumeSubName, mqwrapper.SubscriptionPositionEarliest)
}
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
metrics.DataNodeNumConsumers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
log.Info("datanode AsConsumer", zap.String("physical channel", pchannelName), zap.String("subName", consumeSubName), zap.Int64("collection ID", dmNodeConfig.collectionID))
name := fmt.Sprintf("dmInputNode-data-%d-%s", dmNodeConfig.collectionID, dmNodeConfig.vChannelName)

View File

@ -38,6 +38,7 @@ import (
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/tsoutil"
@ -411,11 +412,11 @@ func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID,
return nil
}, getFlowGraphRetryOpt())
if err != nil {
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.FailLabel).Inc()
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.TotalLabel).Inc()
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
if task.auto {
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.FailLabel).Inc()
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.TotalLabel).Inc()
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
}
err = fmt.Errorf("insertBufferNode flushBufferData failed, err = %s", err)
log.Error(err.Error())
@ -424,11 +425,11 @@ func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID,
segmentsToSync = append(segmentsToSync, task.segmentID)
ibNode.insertBuffer.Delete(task.segmentID)
ibNode.channel.RollPKstats(task.segmentID, pkStats)
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.SuccessLabel).Inc()
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.TotalLabel).Inc()
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SuccessLabel).Inc()
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
if task.auto {
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.TotalLabel).Inc()
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.FailLabel).Inc()
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
}
}
return segmentsToSync
@ -551,7 +552,7 @@ func (ibNode *insertBufferNode) bufferInsertMsg(msg *msgstream.InsertMsg, endPos
// update timestamp range
buffer.updateTimeRange(ibNode.getTimestampRange(tsData))
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Add(float64(len(msg.RowData)))
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.InsertLabel).Add(float64(len(msg.RowData)))
// store in buffer
ibNode.insertBuffer.Store(currentSegID, buffer)
@ -612,7 +613,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, flushCh <-chan fl
return nil, err
}
wTt.AsProducer([]string{Params.CommonCfg.DataCoordTimeTick})
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
log.Info("datanode AsProducer", zap.String("TimeTickChannelName", Params.CommonCfg.DataCoordTimeTick))
var wTtMsgStream msgstream.MsgStream = wTt
wTtMsgStream.Start()
@ -639,7 +640,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, flushCh <-chan fl
commonpbutil.WithMsgType(commonpb.MsgType_DataNodeTt),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(ts),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
ChannelName: config.vChannelName,
Timestamp: ts,
@ -649,7 +650,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, flushCh <-chan fl
msgPack.Msgs = append(msgPack.Msgs, &timeTickMsg)
pt, _ := tsoutil.ParseHybridTs(ts)
pChan := funcutil.ToPhysicalChannel(config.vChannelName)
metrics.DataNodeTimeSync.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), pChan).Set(float64(pt))
metrics.DataNodeTimeSync.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), pChan).Set(float64(pt))
return wTtMsgStream.Produce(&msgPack)
})

View File

@ -24,6 +24,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/util/paramtable"
"go.uber.org/zap"
)
@ -55,14 +56,14 @@ func (fm *flowgraphManager) addAndStart(dn *DataNode, vchan *datapb.VchannelInfo
dataSyncService.start()
fm.flowgraphs.Store(vchan.GetChannelName(), dataSyncService)
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Inc()
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
return nil
}
func (fm *flowgraphManager) release(vchanName string) {
if fg, loaded := fm.flowgraphs.LoadAndDelete(vchanName); loaded {
fg.(*dataSyncService).close()
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Dec()
metrics.DataNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Dec()
}
rateCol.removeFlowGraphChannel(vchanName)
}

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/metautil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/samber/lo"
@ -430,7 +431,7 @@ func (m *rendezvousFlushManager) flushBufferData(data *BufferData, segmentID Uni
data: kvs,
}, field2Insert, field2Stats, flushed, dropped, pos)
metrics.DataNodeEncodeBufferLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID())).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.DataNodeEncodeBufferLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Observe(float64(tr.ElapseSpan().Milliseconds()))
return statsBinlogs, nil
}
@ -575,10 +576,10 @@ func (t *flushBufferInsertTask) flushInsertData() error {
if t.ChunkManager != nil && len(t.data) > 0 {
tr := timerecord.NewTimeRecorder("insertData")
err := t.MultiWrite(ctx, t.data)
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.InsertLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
if err == nil {
for _, d := range t.data {
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Add(float64(len(d)))
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.InsertLabel).Add(float64(len(d)))
}
}
return err
@ -598,10 +599,10 @@ func (t *flushBufferDeleteTask) flushDeleteData() error {
if len(t.data) > 0 && t.ChunkManager != nil {
tr := timerecord.NewTimeRecorder("deleteData")
err := t.MultiWrite(ctx, t.data)
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.DeleteLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
if err == nil {
for _, d := range t.data {
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Add(float64(len(d)))
metrics.DataNodeFlushedSize.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.DeleteLabel).Add(float64(len(d)))
}
}
return err
@ -641,7 +642,7 @@ func dropVirtualChannelFunc(dsService *dataSyncService, opts ...retry.Option) fl
commonpbutil.WithMsgType(0), //TODO msg type
commonpbutil.WithMsgID(0), //TODO msg id
commonpbutil.WithTimeStamp(0), //TODO time stamp
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
ChannelName: dsService.vchannelName,
}
@ -794,7 +795,7 @@ func flushNotifyFunc(dsService *dataSyncService, opts ...retry.Option) notifyMet
commonpbutil.WithMsgType(0),
commonpbutil.WithMsgID(0),
commonpbutil.WithTimeStamp(0),
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
SegmentID: pack.segmentID,
CollectionID: dsService.collectionID,

View File

@ -23,6 +23,7 @@ import (
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
@ -63,7 +64,7 @@ func (mService *metaService) getCollectionInfo(ctx context.Context, collID Uniqu
commonpbutil.WithMsgType(commonpb.MsgType_DescribeCollection),
commonpbutil.WithMsgID(0), //GOOSE TODO
commonpbutil.WithTimeStamp(0), //GOOSE TODO
commonpbutil.WithSourceID(Params.DataNodeCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
DbName: "default", // GOOSE TODO
CollectionID: collID,

View File

@ -24,6 +24,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/hardware"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/ratelimitutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -94,7 +95,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
},
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, paramtable.GetNodeID()),
}, nil
}
hardwareMetrics := metricsinfo.HardwareMetrics{
@ -110,7 +111,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
nodeInfos := metricsinfo.DataNodeInfos{
BaseComponentInfos: metricsinfo.BaseComponentInfos{
Name: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, paramtable.GetNodeID()),
HardwareInfos: hardwareMetrics,
SystemInfo: metricsinfo.DeployMetrics{},
CreatedTime: Params.DataNodeCfg.CreatedTime.String(),
@ -134,7 +135,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
Reason: err.Error(),
},
Response: "",
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, paramtable.GetNodeID()),
}, nil
}
@ -144,6 +145,6 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
Reason: "",
},
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, Params.DataNodeCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, paramtable.GetNodeID()),
}, nil
}

View File

@ -40,10 +40,8 @@ import (
"google.golang.org/grpc"
)
var Params paramtable.BaseTable
func TestConnectionManager(t *testing.T) {
Params.Init()
paramtable.Init()
ctx := context.Background()
session := initSession(ctx)
@ -270,17 +268,18 @@ type testIndexNode struct {
}
func initSession(ctx context.Context) *sessionutil.Session {
rootPath, err := Params.Load("etcd.rootPath")
baseTable := paramtable.Get().BaseTable
rootPath, err := baseTable.Load("etcd.rootPath")
if err != nil {
panic(err)
}
subPath, err := Params.Load("etcd.metaSubPath")
subPath, err := baseTable.Load("etcd.metaSubPath")
if err != nil {
panic(err)
}
metaRootPath := rootPath + "/" + subPath
endpoints := Params.LoadWithDefault("etcd.endpoints", paramtable.DefaultEtcdEndpoints)
endpoints := baseTable.LoadWithDefault("etcd.endpoints", paramtable.DefaultEtcdEndpoints)
etcdEndpoints := strings.Split(endpoints, ",")
log.Debug("metaRootPath", zap.Any("metaRootPath", metaRootPath))

View File

@ -40,7 +40,7 @@ import (
// ClientParams is the parameters of client singleton
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
var _ types.DataCoord = (*Client)(nil)
@ -168,7 +168,7 @@ func (c *Client) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.F
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -223,7 +223,7 @@ func (c *Client) GetSegmentStates(ctx context.Context, req *datapb.GetSegmentSta
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -251,7 +251,7 @@ func (c *Client) GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsert
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -279,7 +279,7 @@ func (c *Client) GetCollectionStatistics(ctx context.Context, req *datapb.GetCol
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -307,7 +307,7 @@ func (c *Client) GetPartitionStatistics(ctx context.Context, req *datapb.GetPart
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -347,7 +347,7 @@ func (c *Client) GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoR
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -380,7 +380,7 @@ func (c *Client) SaveBinlogPaths(ctx context.Context, req *datapb.SaveBinlogPath
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.Call(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -405,7 +405,7 @@ func (c *Client) GetRecoveryInfo(ctx context.Context, req *datapb.GetRecoveryInf
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -432,7 +432,7 @@ func (c *Client) GetFlushedSegments(ctx context.Context, req *datapb.GetFlushedS
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -458,7 +458,7 @@ func (c *Client) GetSegmentsByStates(ctx context.Context, req *datapb.GetSegment
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -477,7 +477,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -497,7 +497,7 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -586,7 +586,7 @@ func (c *Client) DropVirtualChannel(ctx context.Context, req *datapb.DropVirtual
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -605,7 +605,7 @@ func (c *Client) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStat
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -624,7 +624,7 @@ func (c *Client) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*da
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -643,7 +643,7 @@ func (c *Client) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -662,7 +662,7 @@ func (c *Client) AcquireSegmentLock(ctx context.Context, req *datapb.AcquireSegm
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -681,7 +681,7 @@ func (c *Client) ReleaseSegmentLock(ctx context.Context, req *datapb.ReleaseSegm
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -700,7 +700,7 @@ func (c *Client) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSe
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -718,7 +718,7 @@ func (c *Client) UnsetIsImportingState(ctx context.Context, req *datapb.UnsetIsI
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -736,7 +736,7 @@ func (c *Client) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmen
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -755,7 +755,7 @@ func (c *Client) BroadcastAlteredCollection(ctx context.Context, req *milvuspb.A
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {

View File

@ -19,6 +19,7 @@ package grpcdatacoord
import (
"context"
"fmt"
"io"
"net"
"strconv"
@ -87,11 +88,6 @@ func (s *Server) init() error {
closer := trace.InitTracing("datacoord")
s.closer = closer
datacoord.Params.InitOnce()
datacoord.Params.DataCoordCfg.IP = Params.IP
datacoord.Params.DataCoordCfg.Port = Params.Port
datacoord.Params.DataCoordCfg.Address = Params.GetAddress()
etcdCli, err := etcd.GetEtcdClient(&datacoord.Params.EtcdCfg)
if err != nil {
log.Debug("DataCoord connect to etcd failed", zap.Error(err))
@ -99,6 +95,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.dataCoord.SetEtcdClient(etcdCli)
s.dataCoord.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
if s.indexCoord == nil {
var err error

View File

@ -27,6 +27,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/stretchr/testify/assert"
clientv3 "go.etcd.io/etcd/client/v3"
)
@ -86,6 +87,9 @@ func (m *MockDataCoord) Register() error {
return m.regErr
}
func (*MockDataCoord) SetAddress(address string) {
}
func (m *MockDataCoord) SetEtcdClient(etcdClient *clientv3.Client) {
}
@ -232,6 +236,7 @@ func (m *MockDataCoord) CheckHealth(ctx context.Context, req *milvuspb.CheckHeal
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func Test_NewServer(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server := NewServer(ctx, nil)
assert.NotNil(t, server)

View File

@ -34,7 +34,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client is the grpc client for DataNode
type Client struct {
@ -135,7 +135,7 @@ func (c *Client) WatchDmChannels(ctx context.Context, req *datapb.WatchDmChannel
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -164,7 +164,7 @@ func (c *Client) FlushSegments(ctx context.Context, req *datapb.FlushSegmentsReq
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -182,7 +182,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -201,7 +201,7 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -232,7 +232,7 @@ func (c *Client) GetCompactionState(ctx context.Context, req *datapb.CompactionS
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -250,7 +250,7 @@ func (c *Client) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*co
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -267,7 +267,7 @@ func (c *Client) ResendSegmentStats(ctx context.Context, req *datapb.ResendSegme
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -285,7 +285,7 @@ func (c *Client) AddImportSegment(ctx context.Context, req *datapb.AddImportSegm
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.DataNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client datapb.DataNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()

View File

@ -229,9 +229,6 @@ func (s *Server) init() error {
Params.Port = funcutil.GetAvailablePort()
log.Warn("DataNode found available port during init", zap.Int("port", Params.Port))
}
dn.Params.InitOnce()
dn.Params.DataNodeCfg.Port = Params.Port
dn.Params.DataNodeCfg.IP = Params.IP
etcdCli, err := etcd.GetEtcdClient(&dn.Params.EtcdCfg)
if err != nil {
@ -240,6 +237,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.SetEtcdClient(s.etcdCli)
s.datanode.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
closer := trace.InitTracing(fmt.Sprintf("DataNode IP: %s, port: %d", Params.IP, Params.Port))
s.closer = closer
log.Info("DataNode address", zap.String("address", Params.IP+":"+strconv.Itoa(Params.Port)))

View File

@ -27,6 +27,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
clientv3 "go.etcd.io/etcd/client/v3"
@ -80,6 +81,9 @@ func (m *MockDataNode) GetStateCode() commonpb.StateCode {
return m.stateCode
}
func (m *MockDataNode) SetAddress(address string) {
}
func (m *MockDataNode) SetRootCoord(rc types.RootCoord) error {
return m.err
}
@ -201,6 +205,7 @@ func (m *mockRootCoord) Stop() error {
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func Test_NewServer(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.Nil(t, err)

View File

@ -39,7 +39,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client is the grpc client of IndexCoord.
type Client struct {
@ -250,7 +250,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.IndexCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client indexpb.IndexCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -270,7 +270,7 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.IndexCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client indexpb.IndexCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {

View File

@ -31,11 +31,13 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func TestIndexCoordClient(t *testing.T) {
paramtable.Init()
ClientParams.InitOnce(typeutil.IndexCoordRole)
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
@ -44,9 +46,14 @@ func TestIndexCoordClient(t *testing.T) {
icm := indexcoord.NewIndexCoordMock()
etcdCli, err := etcd.GetEtcdClient(&ClientParams.EtcdCfg)
assert.NoError(t, err)
var address string
icm.CallSetAddress = func(addr string) {
address = addr
}
icm.CallRegister = func() error {
session := sessionutil.NewSession(context.Background(), indexcoord.Params.EtcdCfg.MetaRootPath, etcdCli)
session.Init(typeutil.IndexCoordRole, indexcoord.Params.IndexCoordCfg.Address, true, false)
session.Init(typeutil.IndexCoordRole, address, true, false)
session.Register()
return err
}

View File

@ -18,6 +18,7 @@ package grpcindexcoord
import (
"context"
"fmt"
"io"
"net"
"strconv"
@ -93,10 +94,6 @@ func (s *Server) Run() error {
func (s *Server) init() error {
Params.InitOnce(typeutil.IndexCoordRole)
indexcoord.Params.InitOnce()
indexcoord.Params.IndexCoordCfg.Address = Params.GetAddress()
indexcoord.Params.IndexCoordCfg.Port = Params.Port
closer := trace.InitTracing("IndexCoord")
s.closer = closer
@ -107,9 +104,10 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.indexcoord.SetEtcdClient(s.etcdCli)
s.indexcoord.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
s.loopWg.Add(1)
go s.startGrpcLoop(indexcoord.Params.IndexCoordCfg.Port)
go s.startGrpcLoop(Params.Port)
// wait for grpc IndexCoord loop start
if err := <-s.grpcErrChan; err != nil {
log.Error("IndexCoord", zap.Any("init error", err))

View File

@ -28,10 +28,12 @@ import (
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func TestIndexCoordinateServer(t *testing.T) {
paramtable.Init()
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
server, err := NewServer(ctx, factory)

View File

@ -35,7 +35,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client is the grpc client of IndexNode.
type Client struct {
@ -189,7 +189,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.IndexNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client indexpb.IndexNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -208,7 +208,7 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.IndexNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client indexpb.IndexNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()

View File

@ -40,6 +40,7 @@ import (
var ParamsGlobal paramtable.ComponentParam
func Test_NewClient(t *testing.T) {
paramtable.Init()
ClientParams.InitOnce(typeutil.IndexNodeRole)
ctx := context.Background()
client, err := NewClient(ctx, "", false)

View File

@ -130,12 +130,8 @@ func (s *Server) init() error {
Params.Port = funcutil.GetAvailablePort()
log.Warn("IndexNode get available port when init", zap.Int("Port", Params.Port))
}
indexnode.Params.InitOnce()
indexnode.Params.IndexNodeCfg.Port = Params.Port
indexnode.Params.IndexNodeCfg.IP = Params.IP
indexnode.Params.IndexNodeCfg.Address = Params.GetAddress()
closer := trace.InitTracing(fmt.Sprintf("IndexNode-%d", indexnode.Params.IndexNodeCfg.GetNodeID()))
closer := trace.InitTracing(fmt.Sprintf("IndexNode-%d", paramtable.GetNodeID()))
s.closer = closer
defer func() {
@ -163,6 +159,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.indexnode.SetEtcdClient(etcdCli)
s.indexnode.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
err = s.indexnode.Init()
if err != nil {
log.Error("IndexNode Init failed", zap.Error(err))

View File

@ -36,6 +36,7 @@ import (
var ParamsGlobal paramtable.ComponentParam
func TestIndexNodeServer(t *testing.T) {
paramtable.Init()
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
server, err := NewServer(ctx, factory)

View File

@ -34,7 +34,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client is the grpc client for Proxy
type Client struct {
@ -130,7 +130,7 @@ func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.ProxyCfg.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -148,7 +148,7 @@ func (c *Client) InvalidateCredentialCache(ctx context.Context, req *proxypb.Inv
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.ProxyCfg.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -166,7 +166,7 @@ func (c *Client) UpdateCredentialCache(ctx context.Context, req *proxypb.UpdateC
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.ProxyCfg.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -184,7 +184,7 @@ func (c *Client) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.Refres
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.ProxyCfg.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -204,7 +204,7 @@ func (c *Client) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetricsRe
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.ProxyCfg.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -223,7 +223,7 @@ func (c *Client) SetRates(ctx context.Context, req *proxypb.SetRatesRequest) (*c
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.ProxyCfg.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.grpcClient.GetNodeID())),
)
ret, err := c.grpcClient.ReCall(ctx, func(client proxypb.ProxyClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {

View File

@ -316,8 +316,6 @@ func (s *Server) init() error {
log.Warn("Proxy get available port when init", zap.Int("Port", Params.Port))
}
proxy.Params.InitOnce()
proxy.Params.ProxyCfg.NetworkAddress = Params.GetInternalAddress()
log.Debug("init Proxy's parameter table done", zap.String("internal address", Params.GetInternalAddress()), zap.String("external address", Params.GetAddress()))
serviceName := fmt.Sprintf("Proxy ip: %s, port: %d", Params.IP, Params.Port)
@ -332,6 +330,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.proxy.SetEtcdClient(s.etcdCli)
s.proxy.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
errChan := make(chan error, 1)
{

View File

@ -29,6 +29,7 @@ import (
"time"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"go.uber.org/zap"
"google.golang.org/grpc"
@ -823,6 +824,9 @@ func (m *MockProxy) UpdateStateCode(stateCode commonpb.StateCode) {
}
func (m *MockProxy) SetAddress(address string) {
}
func (m *MockProxy) SetEtcdClient(etcdClient *clientv3.Client) {
}
@ -1037,6 +1041,7 @@ func runAndWaitForServerReady(server *Server) error {
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func Test_NewServer(t *testing.T) {
paramtable.Init()
ctx := context.Background()
server, err := NewServer(ctx, nil)
assert.NotNil(t, server)

View File

@ -38,7 +38,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client is the grpc client of QueryCoord.
type Client struct {
@ -163,7 +163,7 @@ func (c *Client) ShowCollections(ctx context.Context, req *querypb.ShowCollectio
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -182,7 +182,7 @@ func (c *Client) LoadCollection(ctx context.Context, req *querypb.LoadCollection
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -201,7 +201,7 @@ func (c *Client) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -220,7 +220,7 @@ func (c *Client) ShowPartitions(ctx context.Context, req *querypb.ShowPartitions
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -239,7 +239,7 @@ func (c *Client) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -258,7 +258,7 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -277,7 +277,7 @@ func (c *Client) GetPartitionStates(ctx context.Context, req *querypb.GetPartiti
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -296,7 +296,7 @@ func (c *Client) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfo
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -315,7 +315,7 @@ func (c *Client) LoadBalance(ctx context.Context, req *querypb.LoadBalanceReques
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -334,7 +334,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -354,7 +354,7 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -373,7 +373,7 @@ func (c *Client) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReque
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -392,7 +392,7 @@ func (c *Client) GetShardLeaders(ctx context.Context, req *querypb.GetShardLeade
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {

View File

@ -18,6 +18,7 @@ package grpcquerycoord
import (
"context"
"fmt"
"io"
"net"
"strconv"
@ -111,10 +112,6 @@ func (s *Server) Run() error {
func (s *Server) init() error {
Params.InitOnce(typeutil.QueryCoordRole)
qc.Params.InitOnce()
qc.Params.QueryCoordCfg.Address = Params.GetAddress()
qc.Params.QueryCoordCfg.Port = Params.Port
closer := trace.InitTracing("querycoord")
s.closer = closer
@ -125,6 +122,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.SetEtcdClient(etcdCli)
s.queryCoord.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
s.wg.Add(1)
go s.startGrpcLoop(Params.Port)

View File

@ -72,6 +72,9 @@ func (m *MockQueryCoord) Register() error {
func (m *MockQueryCoord) UpdateStateCode(code commonpb.StateCode) {
}
func (m *MockQueryCoord) SetAddress(address string) {
}
func (m *MockQueryCoord) SetEtcdClient(client *clientv3.Client) {
}

View File

@ -35,7 +35,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client is the grpc client of QueryNode.
type Client struct {
@ -146,7 +146,7 @@ func (c *Client) WatchDmChannels(ctx context.Context, req *querypb.WatchDmChanne
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -164,7 +164,7 @@ func (c *Client) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmChannel
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -182,7 +182,7 @@ func (c *Client) LoadSegments(ctx context.Context, req *querypb.LoadSegmentsRequ
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -200,7 +200,7 @@ func (c *Client) ReleaseCollection(ctx context.Context, req *querypb.ReleaseColl
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -218,7 +218,7 @@ func (c *Client) ReleasePartitions(ctx context.Context, req *querypb.ReleasePart
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -236,7 +236,7 @@ func (c *Client) ReleaseSegments(ctx context.Context, req *querypb.ReleaseSegmen
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -282,7 +282,7 @@ func (c *Client) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfo
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -300,7 +300,7 @@ func (c *Client) SyncReplicaSegments(ctx context.Context, req *querypb.SyncRepli
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -318,7 +318,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -337,7 +337,7 @@ func (c *Client) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.ReCall(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -367,7 +367,7 @@ func (c *Client) GetDataDistribution(ctx context.Context, req *querypb.GetDataDi
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.Call(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
@ -384,7 +384,7 @@ func (c *Client) SyncDistribution(ctx context.Context, req *querypb.SyncDistribu
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.QueryNodeCfg.GetNodeID()))
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID()))
ret, err := c.grpcClient.Call(ctx, func(client querypb.QueryNodeClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()

View File

@ -23,12 +23,14 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/mock"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
)
func Test_NewClient(t *testing.T) {
paramtable.Init()
ClientParams.InitOnce(typeutil.QueryNodeRole)
ctx := context.Background()

View File

@ -95,11 +95,6 @@ func (s *Server) init() error {
log.Warn("QueryNode get available port when init", zap.Int("Port", Params.Port))
}
qn.Params.InitOnce()
qn.Params.QueryNodeCfg.QueryNodeIP = Params.IP
qn.Params.QueryNodeCfg.QueryNodePort = int64(Params.Port)
//qn.Params.QueryNodeID = Params.QueryNodeID
closer := trace.InitTracing(fmt.Sprintf("query_node ip: %s, port: %d", Params.IP, Params.Port))
s.closer = closer
@ -112,6 +107,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.SetEtcdClient(etcdCli)
s.querynode.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
log.Debug("QueryNode connect to etcd successfully")
s.wg.Add(1)
go s.startGrpcLoop(Params.Port)
@ -162,7 +158,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
addr := ":" + strconv.Itoa(grpcPort)
lis, err = net.Listen("tcp", addr)
if err == nil {
qn.Params.QueryNodeCfg.QueryNodePort = int64(lis.Addr().(*net.TCPAddr).Port)
s.querynode.SetAddress(fmt.Sprintf("%s:%d", Params.IP, lis.Addr().(*net.TCPAddr).Port))
} else {
// set port=0 to get next available port
grpcPort = 0

View File

@ -123,6 +123,9 @@ func (m *MockQueryNode) SyncReplicaSegments(ctx context.Context, req *querypb.Sy
return m.status, m.err
}
func (m *MockQueryNode) SetAddress(address string) {
}
func (m *MockQueryNode) SetEtcdClient(client *clientv3.Client) {
}

View File

@ -39,7 +39,7 @@ import (
var ClientParams paramtable.GrpcClientConfig
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// Client grpc client
type Client struct {
@ -169,7 +169,7 @@ func (c *Client) CreateCollection(ctx context.Context, in *milvuspb.CreateCollec
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -188,7 +188,7 @@ func (c *Client) DropCollection(ctx context.Context, in *milvuspb.DropCollection
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -207,7 +207,7 @@ func (c *Client) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRe
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -226,7 +226,7 @@ func (c *Client) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCo
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -245,7 +245,7 @@ func (c *Client) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectio
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -263,7 +263,7 @@ func (c *Client) AlterCollection(ctx context.Context, request *milvuspb.AlterCol
request = typeutil.Clone(request)
commonpbutil.UpdateMsgBase(
request.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -282,7 +282,7 @@ func (c *Client) CreatePartition(ctx context.Context, in *milvuspb.CreatePartiti
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -301,7 +301,7 @@ func (c *Client) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRe
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -320,7 +320,7 @@ func (c *Client) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequ
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -339,7 +339,7 @@ func (c *Client) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitions
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -358,7 +358,7 @@ func (c *Client) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimest
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -377,7 +377,7 @@ func (c *Client) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -396,7 +396,7 @@ func (c *Client) UpdateChannelTimeTick(ctx context.Context, in *internalpb.Chann
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -415,7 +415,7 @@ func (c *Client) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequ
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -434,7 +434,7 @@ func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -453,7 +453,7 @@ func (c *Client) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -473,7 +473,7 @@ func (c *Client) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest)
in = typeutil.Clone(in)
commonpbutil.UpdateMsgBase(
in.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -492,7 +492,7 @@ func (c *Client) CreateAlias(ctx context.Context, req *milvuspb.CreateAliasReque
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -511,7 +511,7 @@ func (c *Client) DropAlias(ctx context.Context, req *milvuspb.DropAliasRequest)
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -530,7 +530,7 @@ func (c *Client) AlterAlias(ctx context.Context, req *milvuspb.AlterAliasRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -617,7 +617,7 @@ func (c *Client) GetCredential(ctx context.Context, req *rootcoordpb.GetCredenti
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -648,7 +648,7 @@ func (c *Client) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCrede
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -666,7 +666,7 @@ func (c *Client) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUsersR
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -684,7 +684,7 @@ func (c *Client) CreateRole(ctx context.Context, req *milvuspb.CreateRoleRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -702,7 +702,7 @@ func (c *Client) DropRole(ctx context.Context, req *milvuspb.DropRoleRequest) (*
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -720,7 +720,7 @@ func (c *Client) OperateUserRole(ctx context.Context, req *milvuspb.OperateUserR
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -738,7 +738,7 @@ func (c *Client) SelectRole(ctx context.Context, req *milvuspb.SelectRoleRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -756,7 +756,7 @@ func (c *Client) SelectUser(ctx context.Context, req *milvuspb.SelectUserRequest
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -774,7 +774,7 @@ func (c *Client) OperatePrivilege(ctx context.Context, req *milvuspb.OperatePriv
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -792,7 +792,7 @@ func (c *Client) SelectGrant(ctx context.Context, req *milvuspb.SelectGrantReque
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {
@ -810,7 +810,7 @@ func (c *Client) ListPolicy(ctx context.Context, req *internalpb.ListPolicyReque
req = typeutil.Clone(req)
commonpbutil.UpdateMsgBase(
req.GetBase(),
commonpbutil.FillMsgBaseFromClient(Params.RootCoordCfg.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
commonpbutil.FillMsgBaseFromClient(paramtable.GetNodeID(), commonpbutil.WithTargetID(c.sess.ServerID)),
)
ret, err := c.grpcClient.ReCall(ctx, func(client rootcoordpb.RootCoordClient) (any, error) {
if !funcutil.CheckCtxValid(ctx) {

View File

@ -18,6 +18,7 @@ package grpcrootcoord
import (
"context"
"fmt"
"io"
"net"
"strconv"
@ -153,10 +154,6 @@ func (s *Server) Run() error {
func (s *Server) init() error {
Params.InitOnce(typeutil.RootCoordRole)
rootcoord.Params.InitOnce()
rootcoord.Params.RootCoordCfg.Address = Params.GetAddress()
rootcoord.Params.RootCoordCfg.Port = Params.Port
log.Debug("init params done..")
closer := trace.InitTracing("root_coord")
@ -169,6 +166,7 @@ func (s *Server) init() error {
}
s.etcdCli = etcdCli
s.rootCoord.SetEtcdClient(s.etcdCli)
s.rootCoord.SetAddress(fmt.Sprintf("%s:%d", Params.IP, Params.Port))
log.Debug("etcd connect done ...")
err = s.startGrpc(Params.Port)

View File

@ -34,6 +34,7 @@ import (
"github.com/milvus-io/milvus/internal/rootcoord"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -60,6 +61,9 @@ func (m *mockCore) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthReq
func (m *mockCore) UpdateStateCode(commonpb.StateCode) {
}
func (m *mockCore) SetAddress(address string) {
}
func (m *mockCore) SetEtcdClient(etcdClient *clientv3.Client) {
}
@ -148,6 +152,7 @@ func (m *mockQuery) Stop() error {
}
func TestRun(t *testing.T) {
paramtable.Init()
ctx, cancel := context.WithCancel(context.Background())
svr := Server{
rootCoord: &mockCore{},

View File

@ -66,7 +66,7 @@ import (
// make sure IndexCoord implements types.IndexCoord
var _ types.IndexCoord = (*IndexCoord)(nil)
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// IndexCoord is a component responsible for scheduling index construction segments and maintaining index status.
// IndexCoord accepts requests from rootcoord to build indexes, delete indexes, and query index information.
@ -87,6 +87,7 @@ type IndexCoord struct {
factory dependency.Factory
etcdCli *clientv3.Client
address string
etcdKV kv.MetaKv
chunkManager storage.ChunkManager
@ -161,7 +162,7 @@ func (i *IndexCoord) initSession() error {
if i.session == nil {
return errors.New("failed to initialize session")
}
i.session.Init(typeutil.IndexCoordRole, Params.IndexCoordCfg.Address, true, true)
i.session.Init(typeutil.IndexCoordRole, i.address, true, true)
i.session.SetEnableActiveStandBy(i.enableActiveStandBy)
Params.SetLogger(i.session.ServerID)
i.serverID = i.session.ServerID
@ -171,12 +172,11 @@ func (i *IndexCoord) initSession() error {
// Init initializes the IndexCoord component.
func (i *IndexCoord) Init() error {
var initErr error
Params.InitOnce()
i.initOnce.Do(func() {
i.UpdateStateCode(commonpb.StateCode_Initializing)
log.Debug("IndexCoord init", zap.Any("stateCode", i.stateCode.Load().(commonpb.StateCode)))
i.factory.Init(&Params)
i.factory.Init(Params)
err := i.initSession()
if err != nil {
@ -350,6 +350,10 @@ func (i *IndexCoord) Stop() error {
return nil
}
func (i *IndexCoord) SetAddress(address string) {
i.address = address
}
func (i *IndexCoord) SetEtcdClient(etcdClient *clientv3.Client) {
i.etcdCli = etcdClient
}
@ -913,12 +917,12 @@ func (i *IndexCoord) ShowConfigurations(ctx context.Context, req *internalpb.Sho
log.Warn("IndexCoord.ShowConfigurations failed",
zap.Int64("nodeId", i.serverID),
zap.String("req", req.Pattern),
zap.Error(errIndexCoordIsUnhealthy(Params.QueryNodeCfg.GetNodeID())))
zap.Error(errIndexCoordIsUnhealthy(paramtable.GetNodeID())))
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgIndexCoordIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
Reason: msgIndexCoordIsUnhealthy(paramtable.GetNodeID()),
},
Configuations: nil,
}, nil

View File

@ -47,6 +47,7 @@ type Mock struct {
CallGetStatisticsChannel func(ctx context.Context) (*milvuspb.StringResponse, error)
CallRegister func() error
CallSetAddress func(address string)
CallSetEtcdClient func(etcdClient *clientv3.Client)
CallSetDataCoord func(dataCoord types.DataCoord) error
CallSetRootCoord func(rootCoord types.RootCoord) error
@ -83,6 +84,10 @@ func (m *Mock) Register() error {
return m.CallRegister()
}
func (m *Mock) SetAddress(address string) {
m.CallSetAddress(address)
}
func (m *Mock) SetEtcdClient(client *clientv3.Client) {
m.CallSetEtcdClient(client)
}
@ -165,6 +170,8 @@ func NewIndexCoordMock() *Mock {
CallStop: func() error {
return nil
},
CallSetAddress: func(address string) {
},
CallSetEtcdClient: func(etcdClient *clientv3.Client) {
},
CallSetDataCoord: func(dataCoord types.DataCoord) error {

View File

@ -24,7 +24,7 @@ func (m *chunkMgr) NewChunkManager(ctx context.Context, config *indexpb.StorageC
return v.(storage.ChunkManager), nil
}
chunkManagerFactory := storage.NewChunkManagerFactoryWithParam(&Params)
chunkManagerFactory := storage.NewChunkManagerFactoryWithParam(Params)
mgr, err := chunkManagerFactory.NewPersistentStorageChunkManager(ctx)
if err != nil {
return nil, err

View File

@ -32,7 +32,6 @@ import (
"math/rand"
"os"
"path"
"strconv"
"sync"
"sync/atomic"
"syscall"
@ -68,7 +67,7 @@ var _ types.IndexNode = (*IndexNode)(nil)
var _ types.IndexNodeComponent = (*IndexNode)(nil)
// Params is a GlobalParamTable singleton of indexnode
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
type taskKey struct {
ClusterID string
@ -91,6 +90,7 @@ type IndexNode struct {
session *sessionutil.Session
etcdCli *clientv3.Client
address string
closer io.Closer
@ -148,9 +148,7 @@ func (i *IndexNode) initKnowhere() {
// override index builder SIMD type
cSimdType := C.CString(Params.CommonCfg.SimdType)
cRealSimdType := C.IndexBuilderSetSimdType(cSimdType)
Params.CommonCfg.SimdType = C.GoString(cRealSimdType)
C.free(unsafe.Pointer(cRealSimdType))
C.IndexBuilderSetSimdType(cSimdType)
C.free(unsafe.Pointer(cSimdType))
// override segcore index slice size
@ -163,7 +161,7 @@ func (i *IndexNode) initKnowhere() {
cCpuNum := C.int(hardware.GetCPUNum())
C.InitCpuNum(cCpuNum)
initcore.InitLocalStorageConfig(&Params)
initcore.InitLocalStorageConfig(Params)
}
func (i *IndexNode) initSession() error {
@ -171,18 +169,16 @@ func (i *IndexNode) initSession() error {
if i.session == nil {
return errors.New("failed to initialize session")
}
i.session.Init(typeutil.IndexNodeRole, Params.IndexNodeCfg.IP+":"+strconv.Itoa(Params.IndexNodeCfg.Port), false, true)
Params.IndexNodeCfg.SetNodeID(i.session.ServerID)
i.session.Init(typeutil.IndexNodeRole, i.address, false, true)
paramtable.SetNodeID(i.session.ServerID)
Params.SetLogger(i.session.ServerID)
return nil
}
// Init initializes the IndexNode component.
func (i *IndexNode) Init() error {
var initErr error = nil
var initErr error
i.initOnce.Do(func() {
Params.Init()
i.UpdateStateCode(commonpb.StateCode_Initializing)
log.Debug("IndexNode init", zap.Any("State", i.stateCode.Load().(commonpb.StateCode)))
err := i.initSession()
@ -212,7 +208,7 @@ func (i *IndexNode) Init() error {
// Start starts the IndexNode component.
func (i *IndexNode) Start() error {
var startErr error = nil
var startErr error
i.once.Do(func() {
startErr = i.sched.Start()
@ -283,7 +279,7 @@ func (i *IndexNode) isHealthy() bool {
// sp, ctx2 := trace.StartSpanFromContextWithOperationName(i.loopCtx, "IndexNode-CreateIndex")
// defer sp.Finish()
// sp.SetTag("IndexBuildID", strconv.FormatInt(request.IndexBuildID, 10))
// metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.TotalLabel).Inc()
// metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.TotalLabel).Inc()
//
// t := &IndexBuildTask{
// BaseTask: BaseTask{
@ -293,7 +289,7 @@ func (i *IndexNode) isHealthy() bool {
// req: request,
// cm: i.chunkManager,
// etcdKV: i.etcdKV,
// nodeID: Params.IndexNodeCfg.GetNodeID(),
// nodeID: paramtable.GetNodeID(),
// serializedSize: 0,
// }
//
@ -306,12 +302,12 @@ func (i *IndexNode) isHealthy() bool {
// log.Warn("IndexNode failed to schedule", zap.Int64("indexBuildID", request.IndexBuildID), zap.Error(err))
// ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
// ret.Reason = err.Error()
// metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.FailLabel).Inc()
// metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.FailLabel).Inc()
// return ret, nil
// }
// log.Info("IndexNode successfully scheduled", zap.Int64("indexBuildID", request.IndexBuildID))
//
// metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.SuccessLabel).Inc()
// metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.SuccessLabel).Inc()
// return ret, nil
//}
//
@ -389,21 +385,21 @@ func (i *IndexNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringR
}
func (i *IndexNode) GetNodeID() int64 {
return Params.IndexNodeCfg.GetNodeID()
return paramtable.GetNodeID()
}
//ShowConfigurations returns the configurations of indexNode matching req.Pattern
func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
if !i.isHealthy() {
log.Warn("IndexNode.ShowConfigurations failed",
zap.Int64("nodeId", Params.IndexNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Pattern),
zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID())))
zap.Error(errIndexNodeIsUnhealthy(paramtable.GetNodeID())))
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID()),
Reason: msgIndexNodeIsUnhealthy(paramtable.GetNodeID()),
},
Configuations: nil,
}, nil
@ -412,19 +408,23 @@ func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.Show
return getComponentConfigurations(ctx, req), nil
}
func (i *IndexNode) SetAddress(address string) {
i.address = address
}
//// GetMetrics gets the metrics info of IndexNode.
//// TODO(dragondriver): cache the Metrics and set a retention to the cache
//func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
// if !i.isHealthy() {
// log.Warn("IndexNode.GetMetrics failed",
// zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
// zap.Int64("node_id", paramtable.GetNodeID()),
// zap.String("req", req.Request),
// zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID())))
// zap.Error(errIndexNodeIsUnhealthy(paramtable.GetNodeID())))
//
// return &milvuspb.GetMetricsResponse{
// Status: &commonpb.Status{
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
// Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID()),
// Reason: msgIndexNodeIsUnhealthy(paramtable.GetNodeID()),
// },
// Response: "",
// }, nil
@ -433,7 +433,7 @@ func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.Show
// metricType, err := metricsinfo.ParseMetricType(req.Request)
// if err != nil {
// log.Warn("IndexNode.GetMetrics failed to parse metric type",
// zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
// zap.Int64("node_id", paramtable.GetNodeID()),
// zap.String("req", req.Request),
// zap.Error(err))
//
@ -450,7 +450,7 @@ func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.Show
// metrics, err := getSystemInfoMetrics(ctx, req, i)
//
// log.Debug("IndexNode.GetMetrics",
// zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
// zap.Int64("node_id", paramtable.GetNodeID()),
// zap.String("req", req.Request),
// zap.String("metric_type", metricType),
// zap.Error(err))
@ -459,7 +459,7 @@ func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.Show
// }
//
// log.Warn("IndexNode.GetMetrics failed, request metric type is not implemented yet",
// zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
// zap.Int64("node_id", paramtable.GetNodeID()),
// zap.String("req", req.Request),
// zap.String("metric_type", metricType))
//

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/hardware"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -42,6 +43,7 @@ type Mock struct {
CallGetStatisticsChannel func(ctx context.Context) (*milvuspb.StringResponse, error)
CallRegister func() error
CallSetAddress func(address string)
CallSetEtcdClient func(etcdClient *clientv3.Client)
CallUpdateStateCode func(stateCode commonpb.StateCode)
@ -68,6 +70,8 @@ func NewIndexNodeMock() *Mock {
CallStop: func() error {
return nil
},
CallSetAddress: func(address string) {
},
CallSetEtcdClient: func(etcdClient *clientv3.Client) {
},
CallUpdateStateCode: func(stateCode commonpb.StateCode) {
@ -176,6 +180,9 @@ func (m *Mock) Register() error {
return m.CallRegister()
}
func (m *Mock) SetAddress(address string) {
m.CallSetAddress(address)
}
func (m *Mock) SetEtcdClient(etcdClient *clientv3.Client) {
}
@ -215,7 +222,7 @@ func getMockSystemInfoMetrics(
// TODO(dragondriver): add more metrics
nodeInfos := metricsinfo.IndexNodeInfos{
BaseComponentInfos: metricsinfo.BaseComponentInfos{
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
HardwareInfos: metricsinfo.HardwareMetrics{
CPUCoreCount: hardware.GetCPUNum(),
CPUCoreUsage: hardware.GetCPUUsage(),
@ -245,6 +252,6 @@ func getMockSystemInfoMetrics(
Reason: "",
},
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
}, nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
)
@ -43,7 +44,7 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
defer sp.Finish()
sp.SetTag("IndexBuildID", strconv.FormatInt(req.BuildID, 10))
sp.SetTag("ClusterID", req.ClusterID)
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.TotalLabel).Inc()
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.TotalLabel).Inc()
taskCtx, taskCancel := context.WithCancel(i.loopCtx)
if oldInfo := i.loadOrStoreTask(req.ClusterID, req.BuildID, &taskInfo{
@ -86,7 +87,7 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
log.Ctx(ctx).Warn("IndexNode failed to schedule", zap.Int64("IndexBuildID", req.BuildID), zap.String("ClusterID", req.ClusterID), zap.Error(err))
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
ret.Reason = err.Error()
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10), metrics.FailLabel).Inc()
metrics.IndexNodeBuildIndexTaskCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.FailLabel).Inc()
return ret, nil
}
log.Ctx(ctx).Info("IndexNode successfully scheduled", zap.Int64("IndexBuildID", req.BuildID), zap.String("ClusterID", req.ClusterID), zap.String("indexName", req.IndexName))
@ -213,14 +214,14 @@ func (i *IndexNode) GetJobStats(ctx context.Context, req *indexpb.GetJobStatsReq
func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
if !i.isHealthy() {
log.Ctx(ctx).Warn("IndexNode.GetMetrics failed",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(errIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID())))
zap.Error(errIndexNodeIsUnhealthy(paramtable.GetNodeID())))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgIndexNodeIsUnhealthy(Params.IndexNodeCfg.GetNodeID()),
Reason: msgIndexNodeIsUnhealthy(paramtable.GetNodeID()),
},
Response: "",
}, nil
@ -229,7 +230,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
metricType, err := metricsinfo.ParseMetricType(req.Request)
if err != nil {
log.Ctx(ctx).Warn("IndexNode.GetMetrics failed to parse metric type",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(err))
@ -246,7 +247,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
metrics, err := getSystemInfoMetrics(ctx, req, i)
log.Ctx(ctx).Debug("IndexNode.GetMetrics",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType),
zap.Error(err))
@ -255,7 +256,7 @@ func (i *IndexNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequ
}
log.Ctx(ctx).Warn("IndexNode.GetMetrics failed, request metric type is not implemented yet",
zap.Int64("node_id", Params.IndexNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType))

View File

@ -24,6 +24,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/hardware"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -58,7 +59,7 @@ func getSystemInfoMetrics(
// TODO(dragondriver): add more metrics
nodeInfos := metricsinfo.IndexNodeInfos{
BaseComponentInfos: metricsinfo.BaseComponentInfos{
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
HardwareInfos: metricsinfo.HardwareMetrics{
IP: node.session.Address,
CPUCoreCount: hardware.GetCPUNum(),
@ -90,7 +91,7 @@ func getSystemInfoMetrics(
Reason: err.Error(),
},
Response: "",
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
}, nil
}
@ -100,6 +101,6 @@ func getSystemInfoMetrics(
Reason: "",
},
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, Params.IndexNodeCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
}, nil
}

View File

@ -40,6 +40,7 @@ import (
"github.com/milvus-io/milvus/internal/util/indexparams"
"github.com/milvus-io/milvus/internal/util/logutil"
"github.com/milvus-io/milvus/internal/util/metautil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/timerecord"
)
@ -221,7 +222,7 @@ func (it *indexBuildTask) LoadData(ctx context.Context) error {
}
loadFieldDataLatency := it.tr.CtxRecord(ctx, "load field data done")
metrics.IndexNodeLoadFieldLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(loadFieldDataLatency.Milliseconds()))
metrics.IndexNodeLoadFieldLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(loadFieldDataLatency.Milliseconds()))
err = it.decodeBlobs(ctx, blobs)
if err != nil {
@ -257,7 +258,7 @@ func (it *indexBuildTask) BuildIndex(ctx context.Context) error {
}
buildIndexLatency := it.tr.Record("build index done")
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(buildIndexLatency.Milliseconds()))
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(buildIndexLatency.Milliseconds()))
indexBlobs, err := it.index.Serialize()
if err != nil {
@ -295,7 +296,7 @@ func (it *indexBuildTask) BuildIndex(ctx context.Context) error {
return err
}
encodeIndexFileDur := it.tr.Record("index codec serialize done")
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
it.indexBlobs = serializedIndexBlobs
logutil.Logger(ctx).Info("Successfully build index", zap.Int64("buildID", it.BuildID), zap.Int64("Collection", it.collectionID), zap.Int64("SegmentID", it.segmentID))
return nil
@ -372,7 +373,7 @@ func (it *indexBuildTask) BuildDiskAnnIndex(ctx context.Context) error {
}
buildIndexLatency := it.tr.Record("build index done")
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(buildIndexLatency.Milliseconds()))
metrics.IndexNodeKnowhereBuildIndexLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(buildIndexLatency.Milliseconds()))
fileInfos, err := it.index.GetIndexFileInfo()
if err != nil {
@ -397,7 +398,7 @@ func (it *indexBuildTask) BuildDiskAnnIndex(ctx context.Context) error {
}
encodeIndexFileDur := it.tr.Record("index codec serialize done")
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
metrics.IndexNodeEncodeIndexFileLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(encodeIndexFileDur.Milliseconds()))
return nil
}
@ -438,7 +439,7 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, saveFileKeys, it.serializedSize, &it.statistic)
log.Ctx(ctx).Debug("save index files done", zap.Strings("IndexFiles", savePaths))
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
it.tr.Elapse("index building all done")
log.Ctx(ctx).Info("Successfully save index files", zap.Int64("buildID", it.BuildID), zap.Int64("Collection", it.collectionID),
zap.Int64("partition", it.partitionID), zap.Int64("SegmentId", it.segmentID))
@ -498,7 +499,7 @@ func (it *indexBuildTask) SaveDiskAnnIndexFiles(ctx context.Context) error {
it.node.storeIndexFilesAndStatistic(it.ClusterID, it.BuildID, saveFileKeys, it.serializedSize, &it.statistic)
log.Ctx(ctx).Debug("save index files done", zap.Strings("IndexFiles", savePaths))
saveIndexFileDur := it.tr.Record("index file save done")
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
metrics.IndexNodeSaveIndexFileLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(saveIndexFileDur.Milliseconds()))
it.tr.Elapse("index building all done")
log.Ctx(ctx).Info("IndexNode CreateIndex successfully ", zap.Int64("collect", it.collectionID),
zap.Int64("partition", it.partitionID), zap.Int64("segment", it.segmentID))
@ -512,7 +513,7 @@ func (it *indexBuildTask) decodeBlobs(ctx context.Context, blobs []*storage.Blob
return err2
}
decodeDuration := it.tr.RecordSpan().Milliseconds()
metrics.IndexNodeDecodeFieldLatency.WithLabelValues(strconv.FormatInt(Params.IndexNodeCfg.GetNodeID(), 10)).Observe(float64(decodeDuration))
metrics.IndexNodeDecodeFieldLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(decodeDuration))
if len(insertData.Data) != 1 {
return errors.New("we expect only one field in deserialized insert data")

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"go.uber.org/zap"
)
@ -209,13 +210,13 @@ func createStream(factory msgstream.Factory, streamType streamType, pchans []pCh
func incPChansMetrics(pchans []pChan) {
for _, pc := range pchans {
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), pc).Inc()
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), pc).Inc()
}
}
func decPChanMetrics(pchans []pChan) {
for _, pc := range pchans {
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), pc).Dec()
metrics.ProxyMsgStreamObjectsForPChan.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), pc).Dec()
}
}

View File

@ -22,6 +22,7 @@ import (
"testing"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus-proto/go-api/commonpb"
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
@ -237,6 +238,7 @@ func Test_createStream(t *testing.T) {
}
func Test_singleTypeChannelsMgr_createMsgStream(t *testing.T) {
paramtable.Init()
t.Run("re-create", func(t *testing.T) {
m := &singleTypeChannelsMgr{
infos: map[UniqueID]streamInfos{

File diff suppressed because it is too large Load Diff

View File

@ -10,11 +10,13 @@ import (
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/stretchr/testify/assert"
)
func TestProxy_InvalidateCollectionMetaCache_remove_stream(t *testing.T) {
paramtable.Init()
cache := globalMetaCache
globalMetaCache = nil
defer func() { globalMetaCache = cache }()

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"go.uber.org/atomic"
"go.uber.org/zap"
@ -188,7 +189,7 @@ func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string)
collInfo, ok := m.collInfo[collectionName]
if !ok {
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GeCollectionID", metrics.CacheMissLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GeCollectionID", metrics.CacheMissLabel).Inc()
tr := timerecord.NewTimeRecorder("UpdateCache")
m.mu.RUnlock()
coll, err := m.describeCollection(ctx, collectionName)
@ -198,12 +199,12 @@ func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string)
m.mu.Lock()
defer m.mu.Unlock()
m.updateCollection(coll, collectionName)
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
collInfo = m.collInfo[collectionName]
return collInfo.collID, nil
}
defer m.mu.RUnlock()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionID", metrics.CacheHitLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionID", metrics.CacheHitLabel).Inc()
return collInfo.collID, nil
}
@ -218,7 +219,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
if !ok {
tr := timerecord.NewTimeRecorder("UpdateCache")
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheMissLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheMissLabel).Inc()
coll, err := m.describeCollection(ctx, collectionName)
if err != nil {
return nil, err
@ -227,7 +228,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
m.updateCollection(coll, collectionName)
collInfo = m.collInfo[collectionName]
m.mu.Unlock()
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
}
if !collInfo.isLoaded {
@ -235,7 +236,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
showResp, err := m.queryCoord.ShowCollections(ctx, &querypb.ShowCollectionsRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_ShowCollections),
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
CollectionIDs: []int64{collInfo.collID},
})
@ -264,7 +265,7 @@ func (m *MetaCache) GetCollectionInfo(ctx context.Context, collectionName string
}
}
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheHitLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionInfo", metrics.CacheHitLabel).Inc()
return collInfo, nil
}
@ -273,7 +274,7 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri
collInfo, ok := m.collInfo[collectionName]
if !ok {
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheMissLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheMissLabel).Inc()
tr := timerecord.NewTimeRecorder("UpdateCache")
m.mu.RUnlock()
coll, err := m.describeCollection(ctx, collectionName)
@ -287,14 +288,14 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri
defer m.mu.Unlock()
m.updateCollection(coll, collectionName)
collInfo = m.collInfo[collectionName]
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
log.Debug("Reload collection from root coordinator ",
zap.String("collection name ", collectionName),
zap.Any("time (milliseconds) take ", tr.ElapseSpan().Milliseconds()))
return collInfo.schema, nil
}
defer m.mu.RUnlock()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheHitLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetCollectionSchema", metrics.CacheHitLabel).Inc()
return collInfo.schema, nil
}
@ -334,7 +335,7 @@ func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (m
if collInfo.partInfo == nil || len(collInfo.partInfo) == 0 {
tr := timerecord.NewTimeRecorder("UpdateCache")
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitions", metrics.CacheMissLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetPartitions", metrics.CacheMissLabel).Inc()
m.mu.RUnlock()
partitions, err := m.showPartitions(ctx, collectionName)
@ -349,7 +350,7 @@ func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (m
if err != nil {
return nil, err
}
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
log.Debug("proxy", zap.Any("GetPartitions:partitions after update", partitions), zap.Any("collectionName", collectionName))
ret := make(map[string]typeutil.UniqueID)
partInfo := m.collInfo[collectionName].partInfo
@ -360,7 +361,7 @@ func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (m
}
defer m.mu.RUnlock()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitions", metrics.CacheHitLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetPartitions", metrics.CacheHitLabel).Inc()
ret := make(map[string]typeutil.UniqueID)
partInfo := m.collInfo[collectionName].partInfo
@ -391,7 +392,7 @@ func (m *MetaCache) GetPartitionInfo(ctx context.Context, collectionName string,
if !ok {
tr := timerecord.NewTimeRecorder("UpdateCache")
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitionInfo", metrics.CacheMissLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetPartitionInfo", metrics.CacheMissLabel).Inc()
partitions, err := m.showPartitions(ctx, collectionName)
if err != nil {
return nil, err
@ -403,14 +404,14 @@ func (m *MetaCache) GetPartitionInfo(ctx context.Context, collectionName string,
if err != nil {
return nil, err
}
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyUpdateCacheLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
log.Debug("proxy", zap.Any("GetPartitionID:partitions after update", partitions), zap.Any("collectionName", collectionName))
partInfo, ok = m.collInfo[collectionName].partInfo[partitionName]
if !ok {
return nil, ErrPartitionNotExist(partitionName)
}
}
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "GetPartitionInfo", metrics.CacheHitLabel).Inc()
metrics.ProxyCacheStatsCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "GetPartitionInfo", metrics.CacheHitLabel).Inc()
return &partitionInfo{
partitionID: partInfo.partitionID,
createdTimestamp: partInfo.createdTimestamp,
@ -615,7 +616,7 @@ func (m *MetaCache) GetShards(ctx context.Context, withCache bool, collectionNam
req := &querypb.GetShardLeadersRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_GetShardLeaders),
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
CollectionID: info.collID,
}

View File

@ -25,6 +25,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/hardware"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/ratelimitutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -80,7 +81,7 @@ func getProxyMetrics(ctx context.Context, request *milvuspb.GetMetricsRequest, n
}
quotaMetrics.Hms = hardwareMetrics
proxyRoleName := metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID())
proxyRoleName := metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID())
proxyMetricInfo := metricsinfo.ProxyInfos{
BaseComponentInfos: metricsinfo.BaseComponentInfos{
HasError: false,
@ -109,7 +110,7 @@ func getProxyMetrics(ctx context.Context, request *milvuspb.GetMetricsRequest, n
ErrorCode: commonpb.ErrorCode_Success,
},
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
}, nil
}
@ -126,7 +127,7 @@ func getSystemInfoMetrics(
identifierMap := make(map[string]int)
proxyRoleName := metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID())
proxyRoleName := metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID())
identifierMap[proxyRoleName] = int(node.session.ServerID)
proxyTopologyNode := metricsinfo.SystemTopologyNode{
@ -504,7 +505,7 @@ func getSystemInfoMetrics(
Reason: err.Error(),
},
Response: "",
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
}, nil
}
@ -514,6 +515,6 @@ func getSystemInfoMetrics(
Reason: "",
},
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, Params.ProxyCfg.GetNodeID()),
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
}, nil
}

View File

@ -25,6 +25,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/ratelimitutil"
)
@ -77,7 +78,7 @@ func (rl *rateLimiter) setRates(rates []*internalpb.Rate) error {
for _, r := range rates {
if _, ok := rl.limiters[r.GetRt()]; ok {
rl.limiters[r.GetRt()].SetLimit(ratelimitutil.Limit(r.GetR()))
metrics.SetRateGaugeByRateType(r.GetRt(), Params.ProxyCfg.GetNodeID(), r.GetR())
metrics.SetRateGaugeByRateType(r.GetRt(), paramtable.GetNodeID(), r.GetR())
} else {
return fmt.Errorf("unregister rateLimiter for rateType %s", r.GetRt().String())
}

View File

@ -26,7 +26,6 @@ import (
)
func TestMultiRateLimiter(t *testing.T) {
Params.Init()
t.Run("test multiRateLimiter", func(t *testing.T) {
bak := Params.QuotaConfig.QuotaAndLimitsEnabled
Params.QuotaConfig.QuotaAndLimitsEnabled = true
@ -79,7 +78,6 @@ func TestMultiRateLimiter(t *testing.T) {
}
func TestRateLimiter(t *testing.T) {
Params.Init()
t.Run("test limit", func(t *testing.T) {
limiter := newRateLimiter()
limiter.registerLimiters()

View File

@ -60,7 +60,7 @@ type Timestamp = typeutil.Timestamp
// make sure Proxy implements types.Proxy
var _ types.Proxy = (*Proxy)(nil)
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
// rateCol is global rateCollector in Proxy.
var rateCol *ratelimitutil.RateCollector
@ -78,6 +78,7 @@ type Proxy struct {
stateCode atomic.Value
etcdCli *clientv3.Client
address string
rootCoord types.RootCoord
indexCoord types.IndexCoord
dataCoord types.DataCoord
@ -152,8 +153,8 @@ func (node *Proxy) initSession() error {
if node.session == nil {
return errors.New("new session failed, maybe etcd cannot be connected")
}
node.session.Init(typeutil.ProxyRole, Params.ProxyCfg.NetworkAddress, false, true)
Params.ProxyCfg.SetNodeID(node.session.ServerID)
node.session.Init(typeutil.ProxyRole, node.address, false, true)
paramtable.SetNodeID(node.session.ServerID)
Params.SetLogger(node.session.ServerID)
return nil
}
@ -183,48 +184,48 @@ func (node *Proxy) Init() error {
}
log.Info("init session for Proxy done")
node.factory.Init(&Params)
node.factory.Init(Params)
log.Debug("init parameters for factory", zap.String("role", typeutil.ProxyRole), zap.Any("parameters", Params.ServiceParam))
err := node.initRateCollector()
if err != nil {
return err
}
log.Info("Proxy init rateCollector done", zap.Int64("nodeID", Params.ProxyCfg.GetNodeID()))
log.Info("Proxy init rateCollector done", zap.Int64("nodeID", paramtable.GetNodeID()))
log.Debug("create id allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
idAllocator, err := allocator.NewIDAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.GetNodeID())
log.Debug("create id allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
idAllocator, err := allocator.NewIDAllocator(node.ctx, node.rootCoord, paramtable.GetNodeID())
if err != nil {
log.Warn("failed to create id allocator",
zap.Error(err),
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
return err
}
node.rowIDAllocator = idAllocator
log.Debug("create id allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
log.Debug("create id allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
log.Debug("create timestamp allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
tsoAllocator, err := newTimestampAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.GetNodeID())
log.Debug("create timestamp allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
tsoAllocator, err := newTimestampAllocator(node.ctx, node.rootCoord, paramtable.GetNodeID())
if err != nil {
log.Warn("failed to create timestamp allocator",
zap.Error(err),
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
return err
}
node.tsoAllocator = tsoAllocator
log.Debug("create timestamp allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
log.Debug("create timestamp allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
log.Debug("create segment id assigner", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
log.Debug("create segment id assigner", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
segAssigner, err := newSegIDAssigner(node.ctx, node.dataCoord, node.lastTick)
if err != nil {
log.Warn("failed to create segment id assigner",
zap.Error(err),
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
return err
}
node.segAssigner = segAssigner
node.segAssigner.PeerID = Params.ProxyCfg.GetNodeID()
log.Debug("create segment id assigner done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
node.segAssigner.PeerID = paramtable.GetNodeID()
log.Debug("create segment id assigner done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", paramtable.GetNodeID()))
log.Debug("create channels manager", zap.String("role", typeutil.ProxyRole))
dmlChannelsFunc := getDmlChannelsFunc(node.ctx, node.rootCoord)
@ -291,7 +292,7 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
maxTs := ts
for channel, ts := range stats {
physicalTs, _ := tsoutil.ParseHybridTs(ts)
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), channel).Set(float64(physicalTs))
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), channel).Set(float64(physicalTs))
channels = append(channels, channel)
tss = append(tss, ts)
if ts > maxTs {
@ -311,7 +312,7 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
DefaultTimestamp: maxTs,
}
maxPhysicalTs, _ := tsoutil.ParseHybridTs(maxTs)
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), "default").Set(float64(maxPhysicalTs))
metrics.ProxySyncTimeTick.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), "default").Set(float64(maxPhysicalTs))
status, err := node.rootCoord.UpdateChannelTimeTick(node.ctx, req)
if err != nil {
log.Warn("sendChannelsTimeTickLoop.UpdateChannelTimeTick", zap.Error(err))
@ -439,6 +440,10 @@ func (node *Proxy) AddCloseCallback(callbacks ...func()) {
node.closeCallbacks = append(node.closeCallbacks, callbacks...)
}
func (node *Proxy) SetAddress(address string) {
node.address = address
}
// SetEtcdClient sets etcd client for proxy.
func (node *Proxy) SetEtcdClient(client *clientv3.Client) {
node.etcdCli = client

View File

@ -100,7 +100,6 @@ func runRootCoord(ctx context.Context, localMsg bool) *grpcrootcoord.Server {
wg.Add(1)
go func() {
rootcoord.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&rootcoord.Params.Log)
defer log.Sync()
@ -130,8 +129,6 @@ func runQueryCoord(ctx context.Context, localMsg bool) *grpcquerycoord.Server {
wg.Add(1)
go func() {
querycoord.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&querycoord.Params.Log)
defer log.Sync()
@ -161,9 +158,6 @@ func runQueryNode(ctx context.Context, localMsg bool, alias string) *grpcqueryno
wg.Add(1)
go func() {
querynode.Params.QueryNodeCfg.InitAlias(alias)
querynode.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&querynode.Params.Log)
defer log.Sync()
@ -193,8 +187,6 @@ func runDataCoord(ctx context.Context, localMsg bool) *grpcdatacoordclient.Serve
wg.Add(1)
go func() {
datacoord.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&datacoord.Params.Log)
defer log.Sync()
@ -220,9 +212,6 @@ func runDataNode(ctx context.Context, localMsg bool, alias string) *grpcdatanode
wg.Add(1)
go func() {
datanode.Params.DataNodeCfg.InitAlias(alias)
datanode.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&datanode.Params.Log)
defer log.Sync()
@ -252,8 +241,6 @@ func runIndexCoord(ctx context.Context, localMsg bool) *grpcindexcoord.Server {
wg.Add(1)
go func() {
indexcoord.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&indexcoord.Params.Log)
defer log.Sync()
@ -283,9 +270,6 @@ func runIndexNode(ctx context.Context, localMsg bool, alias string) *grpcindexno
wg.Add(1)
go func() {
indexnode.Params.IndexNodeCfg.InitAlias(alias)
indexnode.Params.InitOnce()
if !localMsg {
logutil.SetupLogger(&indexnode.Params.Log)
defer log.Sync()
@ -347,8 +331,7 @@ func (s *proxyTestServer) startGrpc(ctx context.Context, wg *sync.WaitGroup) {
var p paramtable.GrpcServerConfig
p.InitOnce(typeutil.ProxyRole)
Params.InitOnce()
Params.ProxyCfg.NetworkAddress = p.GetAddress()
s.Proxy.SetAddress(p.GetAddress())
var kaep = keepalive.EnforcementPolicy{
MinTime: 5 * time.Second, // If a client pings more than once every 5 seconds, terminate the connection
@ -416,6 +399,7 @@ func (s *proxyTestServer) gracefulStop() {
func TestProxy(t *testing.T) {
var err error
var wg sync.WaitGroup
paramtable.Init()
path := "/tmp/milvus/rocksmq" + funcutil.GenRandomStr()
t.Setenv("ROCKSMQ_PATH", path)
@ -427,7 +411,6 @@ func TestProxy(t *testing.T) {
factory := dependency.NewDefaultFactory(localMsg)
alias := "TestProxy"
Params.InitOnce()
log.Info("Initialize parameter table of Proxy")
rc := runRootCoord(ctx, localMsg)
@ -580,7 +563,7 @@ func TestProxy(t *testing.T) {
states, err := proxy.GetComponentStates(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, states.Status.ErrorCode)
assert.Equal(t, Params.ProxyCfg.GetNodeID(), states.State.NodeID)
assert.Equal(t, paramtable.GetNodeID(), states.State.NodeID)
assert.Equal(t, typeutil.ProxyRole, states.State.Role)
assert.Equal(t, proxy.stateCode.Load().(commonpb.StateCode), states.State.StateCode)
})

View File

@ -36,6 +36,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -143,13 +144,13 @@ func (cct *createCollectionTask) SetTs(ts Timestamp) {
func (cct *createCollectionTask) OnEnqueue() error {
cct.Base = commonpbutil.NewMsgBase()
cct.Base.MsgType = commonpb.MsgType_CreateCollection
cct.Base.SourceID = Params.ProxyCfg.GetNodeID()
cct.Base.SourceID = paramtable.GetNodeID()
return nil
}
func (cct *createCollectionTask) PreExecute(ctx context.Context) error {
cct.Base.MsgType = commonpb.MsgType_CreateCollection
cct.Base.SourceID = Params.ProxyCfg.GetNodeID()
cct.Base.SourceID = paramtable.GetNodeID()
cct.schema = &schemapb.CollectionSchema{}
err := proto.Unmarshal(cct.Schema, cct.schema)
@ -284,7 +285,7 @@ func (dct *dropCollectionTask) OnEnqueue() error {
func (dct *dropCollectionTask) PreExecute(ctx context.Context) error {
dct.Base.MsgType = commonpb.MsgType_DropCollection
dct.Base.SourceID = Params.ProxyCfg.GetNodeID()
dct.Base.SourceID = paramtable.GetNodeID()
if err := validateCollectionName(dct.CollectionName); err != nil {
return err
@ -354,7 +355,7 @@ func (hct *hasCollectionTask) OnEnqueue() error {
func (hct *hasCollectionTask) PreExecute(ctx context.Context) error {
hct.Base.MsgType = commonpb.MsgType_HasCollection
hct.Base.SourceID = Params.ProxyCfg.GetNodeID()
hct.Base.SourceID = paramtable.GetNodeID()
if err := validateCollectionName(hct.CollectionName); err != nil {
return err
@ -428,7 +429,7 @@ func (dct *describeCollectionTask) OnEnqueue() error {
func (dct *describeCollectionTask) PreExecute(ctx context.Context) error {
dct.Base.MsgType = commonpb.MsgType_DescribeCollection
dct.Base.SourceID = Params.ProxyCfg.GetNodeID()
dct.Base.SourceID = paramtable.GetNodeID()
if dct.CollectionID != 0 && len(dct.CollectionName) == 0 {
return nil
@ -546,7 +547,7 @@ func (sct *showCollectionsTask) OnEnqueue() error {
func (sct *showCollectionsTask) PreExecute(ctx context.Context) error {
sct.Base.MsgType = commonpb.MsgType_ShowCollections
sct.Base.SourceID = Params.ProxyCfg.GetNodeID()
sct.Base.SourceID = paramtable.GetNodeID()
if sct.GetType() == milvuspb.ShowType_InMemory {
for _, collectionName := range sct.CollectionNames {
if err := validateCollectionName(collectionName); err != nil {
@ -705,7 +706,7 @@ func (act *alterCollectionTask) OnEnqueue() error {
func (act *alterCollectionTask) PreExecute(ctx context.Context) error {
act.Base.MsgType = commonpb.MsgType_AlterCollection
act.Base.SourceID = Params.ProxyCfg.GetNodeID()
act.Base.SourceID = paramtable.GetNodeID()
return nil
}
@ -767,7 +768,7 @@ func (cpt *createPartitionTask) OnEnqueue() error {
func (cpt *createPartitionTask) PreExecute(ctx context.Context) error {
cpt.Base.MsgType = commonpb.MsgType_CreatePartition
cpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
cpt.Base.SourceID = paramtable.GetNodeID()
collName, partitionTag := cpt.CollectionName, cpt.PartitionName
@ -845,7 +846,7 @@ func (dpt *dropPartitionTask) OnEnqueue() error {
func (dpt *dropPartitionTask) PreExecute(ctx context.Context) error {
dpt.Base.MsgType = commonpb.MsgType_DropPartition
dpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
dpt.Base.SourceID = paramtable.GetNodeID()
collName, partitionTag := dpt.CollectionName, dpt.PartitionName
@ -948,7 +949,7 @@ func (hpt *hasPartitionTask) OnEnqueue() error {
func (hpt *hasPartitionTask) PreExecute(ctx context.Context) error {
hpt.Base.MsgType = commonpb.MsgType_HasPartition
hpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
hpt.Base.SourceID = paramtable.GetNodeID()
collName, partitionTag := hpt.CollectionName, hpt.PartitionName
@ -1025,7 +1026,7 @@ func (spt *showPartitionsTask) OnEnqueue() error {
func (spt *showPartitionsTask) PreExecute(ctx context.Context) error {
spt.Base.MsgType = commonpb.MsgType_ShowPartitions
spt.Base.SourceID = Params.ProxyCfg.GetNodeID()
spt.Base.SourceID = paramtable.GetNodeID()
if err := validateCollectionName(spt.CollectionName); err != nil {
return err
@ -1187,7 +1188,7 @@ func (ft *flushTask) OnEnqueue() error {
func (ft *flushTask) PreExecute(ctx context.Context) error {
ft.Base.MsgType = commonpb.MsgType_Flush
ft.Base.SourceID = Params.ProxyCfg.GetNodeID()
ft.Base.SourceID = paramtable.GetNodeID()
return nil
}
@ -1287,7 +1288,7 @@ func (lct *loadCollectionTask) OnEnqueue() error {
func (lct *loadCollectionTask) PreExecute(ctx context.Context) error {
log.Debug("loadCollectionTask PreExecute", zap.String("role", typeutil.ProxyRole), zap.Int64("msgID", lct.Base.MsgID))
lct.Base.MsgType = commonpb.MsgType_LoadCollection
lct.Base.SourceID = Params.ProxyCfg.GetNodeID()
lct.Base.SourceID = paramtable.GetNodeID()
collName := lct.CollectionName
@ -1419,7 +1420,7 @@ func (rct *releaseCollectionTask) OnEnqueue() error {
func (rct *releaseCollectionTask) PreExecute(ctx context.Context) error {
rct.Base.MsgType = commonpb.MsgType_ReleaseCollection
rct.Base.SourceID = Params.ProxyCfg.GetNodeID()
rct.Base.SourceID = paramtable.GetNodeID()
collName := rct.CollectionName
@ -1507,7 +1508,7 @@ func (lpt *loadPartitionsTask) OnEnqueue() error {
func (lpt *loadPartitionsTask) PreExecute(ctx context.Context) error {
lpt.Base.MsgType = commonpb.MsgType_LoadPartitions
lpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
lpt.Base.SourceID = paramtable.GetNodeID()
collName := lpt.CollectionName
@ -1632,7 +1633,7 @@ func (rpt *releasePartitionsTask) OnEnqueue() error {
func (rpt *releasePartitionsTask) PreExecute(ctx context.Context) error {
rpt.Base.MsgType = commonpb.MsgType_ReleasePartitions
rpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
rpt.Base.SourceID = paramtable.GetNodeID()
collName := rpt.CollectionName
@ -1733,7 +1734,7 @@ func (c *CreateAliasTask) OnEnqueue() error {
// PreExecute defines the action before task execution
func (c *CreateAliasTask) PreExecute(ctx context.Context) error {
c.Base.MsgType = commonpb.MsgType_CreateAlias
c.Base.SourceID = Params.ProxyCfg.GetNodeID()
c.Base.SourceID = paramtable.GetNodeID()
collAlias := c.Alias
// collection alias uses the same format as collection name
@ -1812,7 +1813,7 @@ func (d *DropAliasTask) OnEnqueue() error {
func (d *DropAliasTask) PreExecute(ctx context.Context) error {
d.Base.MsgType = commonpb.MsgType_DropAlias
d.Base.SourceID = Params.ProxyCfg.GetNodeID()
d.Base.SourceID = paramtable.GetNodeID()
collAlias := d.Alias
if err := ValidateCollectionAlias(collAlias); err != nil {
return err
@ -1878,7 +1879,7 @@ func (a *AlterAliasTask) OnEnqueue() error {
func (a *AlterAliasTask) PreExecute(ctx context.Context) error {
a.Base.MsgType = commonpb.MsgType_AlterAlias
a.Base.SourceID = Params.ProxyCfg.GetNodeID()
a.Base.SourceID = paramtable.GetNodeID()
collAlias := a.Alias
// collection alias uses the same format as collection name

View File

@ -35,6 +35,7 @@ import (
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
"github.com/milvus-io/milvus/internal/util/indexparams"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -152,7 +153,7 @@ func (cit *createIndexTask) parseIndexParams() error {
return fmt.Errorf("IndexType not specified")
}
if indexType == indexparamcheck.IndexDISKANN {
err := indexparams.FillDiskIndexParams(&Params, indexParamsMap)
err := indexparams.FillDiskIndexParams(Params, indexParamsMap)
if err != nil {
return err
}
@ -264,7 +265,7 @@ func checkTrain(field *schemapb.FieldSchema, indexParams map[string]string) erro
func (cit *createIndexTask) PreExecute(ctx context.Context) error {
cit.req.Base.MsgType = commonpb.MsgType_CreateIndex
cit.req.Base.SourceID = Params.ProxyCfg.GetNodeID()
cit.req.Base.SourceID = paramtable.GetNodeID()
collName := cit.req.GetCollectionName()
@ -382,7 +383,7 @@ func (dit *describeIndexTask) OnEnqueue() error {
func (dit *describeIndexTask) PreExecute(ctx context.Context) error {
dit.Base.MsgType = commonpb.MsgType_DescribeIndex
dit.Base.SourceID = Params.ProxyCfg.GetNodeID()
dit.Base.SourceID = paramtable.GetNodeID()
if err := validateCollectionName(dit.CollectionName); err != nil {
return err
@ -496,7 +497,7 @@ func (dit *dropIndexTask) OnEnqueue() error {
func (dit *dropIndexTask) PreExecute(ctx context.Context) error {
dit.Base.MsgType = commonpb.MsgType_DropIndex
dit.Base.SourceID = Params.ProxyCfg.GetNodeID()
dit.Base.SourceID = paramtable.GetNodeID()
collName, fieldName := dit.CollectionName, dit.FieldName
@ -601,7 +602,7 @@ func (gibpt *getIndexBuildProgressTask) OnEnqueue() error {
func (gibpt *getIndexBuildProgressTask) PreExecute(ctx context.Context) error {
gibpt.Base.MsgType = commonpb.MsgType_GetIndexBuildProgress
gibpt.Base.SourceID = Params.ProxyCfg.GetNodeID()
gibpt.Base.SourceID = paramtable.GetNodeID()
if err := validateCollectionName(gibpt.CollectionName); err != nil {
return err
@ -694,7 +695,7 @@ func (gist *getIndexStateTask) OnEnqueue() error {
func (gist *getIndexStateTask) PreExecute(ctx context.Context) error {
gist.Base.MsgType = commonpb.MsgType_GetIndexState
gist.Base.SourceID = Params.ProxyCfg.GetNodeID()
gist.Base.SourceID = paramtable.GetNodeID()
if err := validateCollectionName(gist.CollectionName); err != nil {
return err

View File

@ -19,6 +19,7 @@ package proxy
import (
"context"
"errors"
"os"
"testing"
"github.com/stretchr/testify/assert"
@ -29,9 +30,16 @@ import (
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func TestMain(m *testing.M) {
paramtable.Init()
code := m.Run()
os.Exit(code)
}
func TestGetIndexStateTask_Execute(t *testing.T) {
dbName := funcutil.GenRandomStr()
collectionName := funcutil.GenRandomStr()
@ -210,7 +218,6 @@ func TestCreateIndexTask_PreExecute(t *testing.T) {
collectionID := UniqueID(1)
fieldName := newTestSchema().Fields[0].Name
Params.Init()
ic := newMockIndexCoord()
ctx := context.Background()

View File

@ -14,6 +14,7 @@ import (
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
@ -207,7 +208,7 @@ func (it *insertTask) PreExecute(ctx context.Context) error {
var rowIDEnd UniqueID
tr := timerecord.NewTimeRecorder("applyPK")
rowIDBegin, rowIDEnd, _ = it.idAllocator.Alloc(rowNums)
metrics.ProxyApplyPrimaryKeyLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan()))
metrics.ProxyApplyPrimaryKeyLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan()))
it.RowIDs = make([]UniqueID, rowNums)
for i := rowIDBegin; i < rowIDEnd; i++ {
@ -474,7 +475,7 @@ func (it *insertTask) Execute(ctx context.Context) error {
return err
}
sendMsgDur := tr.Record("send insert request to dml channel")
metrics.ProxySendMutationReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.InsertLabel).Observe(float64(sendMsgDur.Milliseconds()))
metrics.ProxySendMutationReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.InsertLabel).Observe(float64(sendMsgDur.Milliseconds()))
log.Debug("Proxy Insert Execute done", zap.Int64("msgID", it.Base.MsgID), zap.String("collection name", collectionName))

View File

@ -59,7 +59,7 @@ func TestInsertTask_checkLengthOfFieldsData(t *testing.T) {
// Base: &commonpb.MsgBase{
// MsgType: commonpb.MsgType_Insert,
// MsgID: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// },
// }

View File

@ -57,7 +57,6 @@ func TestUpdateShardsWithRoundRobin(t *testing.T) {
func TestGroupShardLeadersWithSameQueryNode(t *testing.T) {
var err error
Params.Init()
var (
ctx = context.TODO()
)
@ -121,7 +120,6 @@ func TestGroupShardLeadersWithSameQueryNode(t *testing.T) {
func TestMergeRoundRobinPolicy(t *testing.T) {
var err error
Params.Init()
var (
ctx = context.TODO()
)

View File

@ -18,6 +18,7 @@ import (
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/grpcclient"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -164,7 +165,7 @@ func (t *queryTask) PreExecute(ctx context.Context) error {
}
t.Base.MsgType = commonpb.MsgType_Retrieve
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
t.Base.SourceID = paramtable.GetNodeID()
collectionName := t.request.CollectionName
t.collectionName = collectionName
@ -346,13 +347,13 @@ func (t *queryTask) PostExecute(ctx context.Context) error {
}
}
metrics.ProxyDecodeResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.QueryLabel).Observe(0.0)
metrics.ProxyDecodeResultLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.QueryLabel).Observe(0.0)
tr.CtxRecord(ctx, "reduceResultStart")
t.result, err = reduceRetrieveResults(ctx, t.toReduceResults, t.queryParams)
if err != nil {
return err
}
metrics.ProxyReduceResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.QueryLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
metrics.ProxyReduceResultLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.QueryLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
t.result.CollectionName = t.collectionName
if len(t.result.FieldsData) > 0 {

View File

@ -23,11 +23,11 @@ import (
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
func TestQueryTask_all(t *testing.T) {
Params.Init()
var (
err error
@ -100,7 +100,7 @@ func TestQueryTask_all(t *testing.T) {
status, err := qc.LoadCollection(ctx, &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionID: collectionID,
})
@ -113,7 +113,7 @@ func TestQueryTask_all(t *testing.T) {
RetrieveRequest: &internalpb.RetrieveRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Retrieve,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionID: collectionID,
OutputFieldsId: make([]int64, len(fieldName2Types)),
@ -127,7 +127,7 @@ func TestQueryTask_all(t *testing.T) {
request: &milvuspb.QueryRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Retrieve,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionName: collectionName,
Expr: expr,

View File

@ -26,7 +26,6 @@ import (
)
func TestBaseTaskQueue(t *testing.T) {
Params.Init()
var err error
var unissuedTask task
@ -104,7 +103,6 @@ func TestBaseTaskQueue(t *testing.T) {
}
func TestDdTaskQueue(t *testing.T) {
Params.Init()
var err error
var unissuedTask task
@ -183,7 +181,6 @@ func TestDdTaskQueue(t *testing.T) {
// test the logic of queue
func TestDmTaskQueue_Basic(t *testing.T) {
Params.Init()
var err error
var unissuedTask task
@ -262,7 +259,6 @@ func TestDmTaskQueue_Basic(t *testing.T) {
// test the timestamp statistics
func TestDmTaskQueue_TimestampStatistics(t *testing.T) {
Params.Init()
var err error
var unissuedTask task
@ -301,7 +297,6 @@ func TestDmTaskQueue_TimestampStatistics(t *testing.T) {
}
func TestDqTaskQueue(t *testing.T) {
Params.Init()
var err error
var unissuedTask task
@ -379,7 +374,6 @@ func TestDqTaskQueue(t *testing.T) {
}
func TestTaskScheduler(t *testing.T) {
Params.Init()
var err error

View File

@ -22,6 +22,7 @@ import (
"github.com/milvus-io/milvus/internal/util/distance"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/grpcclient"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/tsoutil"
@ -265,7 +266,7 @@ func (t *searchTask) PreExecute(ctx context.Context) error {
}
t.Base.MsgType = commonpb.MsgType_Search
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
t.Base.SourceID = paramtable.GetNodeID()
collectionName := t.request.CollectionName
t.collectionName = collectionName
@ -443,7 +444,7 @@ func (t *searchTask) PostExecute(ctx context.Context) error {
if err != nil {
return err
}
metrics.ProxyDecodeResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
metrics.ProxyDecodeResultLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
if len(validSearchResults) <= 0 {
@ -466,7 +467,7 @@ func (t *searchTask) PostExecute(ctx context.Context) error {
return err
}
metrics.ProxyReduceResultLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
metrics.ProxyReduceResultLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.SearchLabel).Observe(float64(tr.RecordSpan().Milliseconds()))
t.result.CollectionName = t.collectionName
t.fillInFieldInfo()
@ -563,7 +564,7 @@ func checkIfLoaded(ctx context.Context, qc types.QueryCoord, collectionName stri
resp, err := qc.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
CollectionID: info.collID,
PartitionIDs: searchPartitionIDs,
@ -844,6 +845,6 @@ func (t *searchTask) SetTs(ts Timestamp) {
func (t *searchTask) OnEnqueue() error {
t.Base = commonpbutil.NewMsgBase()
t.Base.MsgType = commonpb.MsgType_Search
t.Base.SourceID = Params.ProxyCfg.GetNodeID()
t.Base.SourceID = paramtable.GetNodeID()
return nil
}

View File

@ -13,6 +13,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/util/autoindex"
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -46,7 +47,7 @@ func TestSearchTask_PostExecute(t *testing.T) {
SearchRequest: &internalpb.SearchRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Search,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
},
request: nil,
@ -115,7 +116,6 @@ func getValidSearchParams() []*commonpb.KeyValuePair {
func TestSearchTask_PreExecute(t *testing.T) {
var err error
Params.InitOnce()
var (
rc = NewRootCoordMock()
qc = NewQueryCoordMock()
@ -290,7 +290,6 @@ func TestSearchTask_PreExecute(t *testing.T) {
}
func TestSearchTaskV2_Execute(t *testing.T) {
Params.InitOnce()
var (
err error
@ -352,7 +351,6 @@ func genSearchResultData(nq int64, topk int64, ids []int64, scores []float32) *s
}
func TestSearchTask_Ts(t *testing.T) {
Params.InitOnce()
task := &searchTask{
SearchRequest: &internalpb.SearchRequest{},
@ -404,7 +402,6 @@ func TestSearchTask_Reduce(t *testing.T) {
func TestSearchTaskWithInvalidRoundDecimal(t *testing.T) {
// var err error
//
// Params.Init()
// Params.ProxyCfg.SearchResultChannelNames = []string{funcutil.GenRandomStr()}
//
// rc := NewRootCoordMock()
@ -476,7 +473,7 @@ func TestSearchTaskWithInvalidRoundDecimal(t *testing.T) {
// MsgType: commonpb.MsgType_LoadCollection,
// MsgID: 0,
// Timestamp: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// DbID: 0,
// CollectionID: collectionID,
@ -497,9 +494,9 @@ func TestSearchTaskWithInvalidRoundDecimal(t *testing.T) {
// MsgType: commonpb.MsgType_Search,
// MsgID: 0,
// Timestamp: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
// ResultChannelID: strconv.FormatInt(paramtable.GetNodeID(), 10),
// DbID: 0,
// CollectionID: 0,
// PartitionIDs: nil,
@ -647,7 +644,6 @@ func TestSearchTaskWithInvalidRoundDecimal(t *testing.T) {
func TestSearchTaskV2_all(t *testing.T) {
// var err error
//
// Params.Init()
// Params.ProxyCfg.SearchResultChannelNames = []string{funcutil.GenRandomStr()}
//
// rc := NewRootCoordMock()
@ -720,7 +716,7 @@ func TestSearchTaskV2_all(t *testing.T) {
// MsgType: commonpb.MsgType_LoadCollection,
// MsgID: 0,
// Timestamp: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// DbID: 0,
// CollectionID: collectionID,
@ -741,9 +737,9 @@ func TestSearchTaskV2_all(t *testing.T) {
// MsgType: commonpb.MsgType_Search,
// MsgID: 0,
// Timestamp: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
// ResultChannelID: strconv.FormatInt(paramtable.GetNodeID(), 10),
// DbID: 0,
// CollectionID: 0,
// PartitionIDs: nil,
@ -892,7 +888,6 @@ func TestSearchTaskV2_all(t *testing.T) {
func TestSearchTaskV2_7803_reduce(t *testing.T) {
// var err error
//
// Params.Init()
// Params.ProxyCfg.SearchResultChannelNames = []string{funcutil.GenRandomStr()}
//
// rc := NewRootCoordMock()
@ -958,7 +953,7 @@ func TestSearchTaskV2_7803_reduce(t *testing.T) {
// MsgType: commonpb.MsgType_LoadCollection,
// MsgID: 0,
// Timestamp: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// DbID: 0,
// CollectionID: collectionID,
@ -979,9 +974,9 @@ func TestSearchTaskV2_7803_reduce(t *testing.T) {
// MsgType: commonpb.MsgType_Search,
// MsgID: 0,
// Timestamp: 0,
// SourceID: Params.ProxyCfg.GetNodeID(),
// SourceID: paramtable.GetNodeID(),
// },
// ResultChannelID: strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10),
// ResultChannelID: strconv.FormatInt(paramtable.GetNodeID(), 10),
// DbID: 0,
// CollectionID: 0,
// PartitionIDs: nil,
@ -1645,7 +1640,6 @@ func Test_checkIfLoaded(t *testing.T) {
}
func TestSearchTask_ErrExecute(t *testing.T) {
Params.Init()
var (
err error
@ -1714,7 +1708,7 @@ func TestSearchTask_ErrExecute(t *testing.T) {
status, err := qc.LoadCollection(ctx, &querypb.LoadCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_LoadCollection,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionID: collectionID,
})
@ -1727,7 +1721,7 @@ func TestSearchTask_ErrExecute(t *testing.T) {
SearchRequest: &internalpb.SearchRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Retrieve,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionID: collectionID,
OutputFieldsId: make([]int64, len(fieldName2Types)),
@ -1741,7 +1735,7 @@ func TestSearchTask_ErrExecute(t *testing.T) {
request: &milvuspb.SearchRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Retrieve,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionName: collectionName,
Nq: 2,

View File

@ -17,6 +17,7 @@ import (
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/grpcclient"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/tsoutil"
@ -110,7 +111,7 @@ func (g *getStatisticsTask) PreExecute(ctx context.Context) error {
// TODO: Maybe we should create a new MsgType: GetStatistics?
g.Base.MsgType = commonpb.MsgType_GetPartitionStatistics
g.Base.SourceID = Params.ProxyCfg.GetNodeID()
g.Base.SourceID = paramtable.GetNodeID()
collID, err := globalMetaCache.GetCollectionID(ctx, g.collectionName)
if err != nil { // err is not nil if collection not exists
@ -327,7 +328,7 @@ func checkFullLoaded(ctx context.Context, qc types.QueryCoord, collectionName st
resp, err := qc.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
CollectionID: info.collID,
PartitionIDs: searchPartitionIDs,
@ -352,7 +353,7 @@ func checkFullLoaded(ctx context.Context, qc types.QueryCoord, collectionName st
resp, err := qc.ShowPartitions(ctx, &querypb.ShowPartitionsRequest{
Base: commonpbutil.NewMsgBase(
commonpbutil.WithMsgType(commonpb.MsgType_ShowPartitions),
commonpbutil.WithSourceID(Params.ProxyCfg.GetNodeID()),
commonpbutil.WithSourceID(paramtable.GetNodeID()),
),
CollectionID: info.collID,
})
@ -633,7 +634,7 @@ func (g *getCollectionStatisticsTask) OnEnqueue() error {
func (g *getCollectionStatisticsTask) PreExecute(ctx context.Context) error {
g.Base.MsgType = commonpb.MsgType_GetCollectionStatistics
g.Base.SourceID = Params.ProxyCfg.GetNodeID()
g.Base.SourceID = paramtable.GetNodeID()
return nil
}
@ -721,7 +722,7 @@ func (g *getPartitionStatisticsTask) OnEnqueue() error {
func (g *getPartitionStatisticsTask) PreExecute(ctx context.Context) error {
g.Base.MsgType = commonpb.MsgType_GetPartitionStatistics
g.Base.SourceID = Params.ProxyCfg.GetNodeID()
g.Base.SourceID = paramtable.GetNodeID()
return nil
}

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/internal/allocator"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/golang/protobuf/proto"
@ -474,7 +475,6 @@ func TestTranslateOutputFields(t *testing.T) {
}
func TestCreateCollectionTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
@ -755,7 +755,6 @@ func TestCreateCollectionTask(t *testing.T) {
}
func TestHasCollectionTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -812,7 +811,7 @@ func TestHasCollectionTask(t *testing.T) {
assert.Equal(t, UniqueID(100), task.ID())
assert.Equal(t, Timestamp(100), task.BeginTs())
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
// missing collectionID in globalMetaCache
err = task.Execute(ctx)
assert.Nil(t, err)
@ -841,7 +840,6 @@ func TestHasCollectionTask(t *testing.T) {
}
func TestDescribeCollectionTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -877,7 +875,7 @@ func TestDescribeCollectionTask(t *testing.T) {
assert.Equal(t, UniqueID(100), task.ID())
assert.Equal(t, Timestamp(100), task.BeginTs())
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
// missing collectionID in globalMetaCache
err := task.Execute(ctx)
assert.Nil(t, err)
@ -904,7 +902,6 @@ func TestDescribeCollectionTask(t *testing.T) {
}
func TestDescribeCollectionTask_ShardsNum1(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -969,7 +966,6 @@ func TestDescribeCollectionTask_ShardsNum1(t *testing.T) {
}
func TestDescribeCollectionTask_ShardsNum2(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1036,7 +1032,6 @@ func TestDescribeCollectionTask_ShardsNum2(t *testing.T) {
}
func TestCreatePartitionTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1068,7 +1063,7 @@ func TestCreatePartitionTask(t *testing.T) {
assert.Equal(t, UniqueID(100), task.ID())
assert.Equal(t, Timestamp(100), task.BeginTs())
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
@ -1083,7 +1078,6 @@ func TestCreatePartitionTask(t *testing.T) {
}
func TestDropPartitionTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1133,7 +1127,7 @@ func TestDropPartitionTask(t *testing.T) {
assert.Equal(t, UniqueID(100), task.ID())
assert.Equal(t, Timestamp(100), task.BeginTs())
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
@ -1192,7 +1186,6 @@ func TestDropPartitionTask(t *testing.T) {
}
func TestHasPartitionTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1224,7 +1217,7 @@ func TestHasPartitionTask(t *testing.T) {
assert.Equal(t, UniqueID(100), task.ID())
assert.Equal(t, Timestamp(100), task.BeginTs())
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
@ -1239,7 +1232,6 @@ func TestHasPartitionTask(t *testing.T) {
}
func TestShowPartitionsTask(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1272,7 +1264,7 @@ func TestShowPartitionsTask(t *testing.T) {
assert.Equal(t, UniqueID(100), task.ID())
assert.Equal(t, Timestamp(100), task.BeginTs())
assert.Equal(t, Timestamp(100), task.EndTs())
assert.Equal(t, Params.ProxyCfg.GetNodeID(), task.GetBase().GetSourceID())
assert.Equal(t, paramtable.GetNodeID(), task.GetBase().GetSourceID())
err := task.Execute(ctx)
assert.NotNil(t, err)
@ -1296,8 +1288,6 @@ func TestShowPartitionsTask(t *testing.T) {
func TestTask_Int64PrimaryKey(t *testing.T) {
var err error
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1359,7 +1349,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
MsgType: commonpb.MsgType_CreatePartition,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
DbName: dbName,
CollectionName: collectionName,
@ -1387,14 +1377,13 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
_ = ticker.start()
defer ticker.close()
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.GetNodeID())
idAllocator, err := allocator.NewIDAllocator(ctx, rc, paramtable.GetNodeID())
assert.NoError(t, err)
_ = idAllocator.Start()
defer idAllocator.Close()
segAllocator, err := newSegIDAssigner(ctx, &mockDataCoord{expireTime: Timestamp(2500)}, getLastTick1)
assert.NoError(t, err)
segAllocator.Init()
_ = segAllocator.Start()
defer segAllocator.Close()
@ -1409,7 +1398,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Insert,
MsgID: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
DbName: dbName,
CollectionName: collectionName,
@ -1464,7 +1453,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
MsgType: commonpb.MsgType_Delete,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionName: collectionName,
PartitionName: partitionName,
@ -1518,7 +1507,7 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
MsgType: commonpb.MsgType_Delete,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionName: collectionName,
PartitionName: partitionName,
@ -1550,8 +1539,6 @@ func TestTask_Int64PrimaryKey(t *testing.T) {
func TestTask_VarCharPrimaryKey(t *testing.T) {
var err error
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1614,7 +1601,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
MsgType: commonpb.MsgType_CreatePartition,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
DbName: dbName,
CollectionName: collectionName,
@ -1642,7 +1629,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
_ = ticker.start()
defer ticker.close()
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.GetNodeID())
idAllocator, err := allocator.NewIDAllocator(ctx, rc, paramtable.GetNodeID())
assert.NoError(t, err)
_ = idAllocator.Start()
defer idAllocator.Close()
@ -1664,7 +1651,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Insert,
MsgID: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
DbName: dbName,
CollectionName: collectionName,
@ -1721,7 +1708,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
MsgType: commonpb.MsgType_Delete,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionName: collectionName,
PartitionName: partitionName,
@ -1775,7 +1762,7 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
MsgType: commonpb.MsgType_Delete,
MsgID: 0,
Timestamp: 0,
SourceID: Params.ProxyCfg.GetNodeID(),
SourceID: paramtable.GetNodeID(),
},
CollectionName: collectionName,
PartitionName: partitionName,
@ -1805,7 +1792,6 @@ func TestTask_VarCharPrimaryKey(t *testing.T) {
}
func TestCreateAlias_all(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1847,7 +1833,6 @@ func TestCreateAlias_all(t *testing.T) {
}
func TestDropAlias_all(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -1886,7 +1871,6 @@ func TestDropAlias_all(t *testing.T) {
}
func TestAlterAlias_all(t *testing.T) {
Params.InitOnce()
rc := NewRootCoordMock()
rc.Start()
defer rc.Stop()
@ -2125,7 +2109,6 @@ func Test_checkTrain(t *testing.T) {
func Test_createIndexTask_PreExecute(t *testing.T) {
collectionName := "test"
fieldName := "test"
Params.Init()
cit := &createIndexTask{
req: &milvuspb.CreateIndexRequest{
@ -2231,7 +2214,6 @@ func Test_createIndexTask_PreExecute(t *testing.T) {
}
func Test_dropCollectionTask_PreExecute(t *testing.T) {
Params.InitOnce()
dct := &dropCollectionTask{DropCollectionRequest: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{},
CollectionName: "0xffff", // invalid

View File

@ -17,6 +17,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/planpb"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/trace"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -154,7 +155,7 @@ func getPrimaryKeysFromExpr(schema *schemapb.CollectionSchema, expr string) (res
func (dt *deleteTask) PreExecute(ctx context.Context) error {
dt.Base.MsgType = commonpb.MsgType_Delete
dt.Base.SourceID = Params.ProxyCfg.GetNodeID()
dt.Base.SourceID = paramtable.GetNodeID()
dt.result = &milvuspb.MutationResult{
Status: &commonpb.Status{
@ -313,7 +314,7 @@ func (dt *deleteTask) Execute(ctx context.Context) (err error) {
return err
}
sendMsgDur := tr.Record("send delete request to dml channels")
metrics.ProxySendMutationReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), metrics.DeleteLabel).Observe(float64(sendMsgDur.Milliseconds()))
metrics.ProxySendMutationReqLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.DeleteLabel).Observe(float64(sendMsgDur.Milliseconds()))
return nil
}

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/util/commonpbutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
)
@ -62,7 +63,7 @@ func (ta *timestampAllocator) alloc(count uint32) ([]Timestamp, error) {
resp, err := ta.tso.AllocTimestamp(ctx, req)
defer func() {
cancel()
metrics.ProxyApplyTimestampLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyApplyTimestampLatency.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10)).Observe(float64(tr.ElapseSpan().Milliseconds()))
}()
if err != nil {

View File

@ -608,13 +608,11 @@ func TestValidateUsername(t *testing.T) {
res = ValidateUsername("a1^7*).,")
assert.Error(t, res)
// normal username that only contains alphabet, _, and number
Params.InitOnce()
res = ValidateUsername("a17_good")
assert.Nil(t, res)
}
func TestValidatePassword(t *testing.T) {
Params.InitOnce()
// only spaces
res := ValidatePassword("")
assert.NotNil(t, res)
@ -636,7 +634,6 @@ func TestReplaceID2Name(t *testing.T) {
}
func TestValidateName(t *testing.T) {
Params.InitOnce()
nameType := "Test"
validNames := []string{
"abc",
@ -779,7 +776,6 @@ func TestPasswordVerify(t *testing.T) {
}
func TestValidateTravelTimestamp(t *testing.T) {
Params.Init()
originalRetentionDuration := Params.CommonCfg.RetentionDuration
defer func() {
Params.CommonCfg.RetentionDuration = originalRetentionDuration

View File

@ -36,6 +36,7 @@ import (
"github.com/milvus-io/milvus/internal/querycoordv2/utils"
"github.com/milvus-io/milvus/internal/util/hardware"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/util/uniquegenerator"
)
@ -153,7 +154,7 @@ func (s *Server) getSystemInfoMetrics(
clusterTopology := metricsinfo.QueryClusterTopology{
Self: metricsinfo.QueryCoordInfos{
BaseComponentInfos: metricsinfo.BaseComponentInfos{
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, paramtable.GetNodeID()),
HardwareInfos: metricsinfo.HardwareMetrics{
IP: s.session.Address,
CPUCoreCount: hardware.GetCPUNum(),
@ -183,7 +184,7 @@ func (s *Server) getSystemInfoMetrics(
coordTopology := metricsinfo.QueryCoordTopology{
Cluster: clusterTopology,
Connections: metricsinfo.ConnTopology{
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, Params.QueryCoordCfg.GetNodeID()),
Name: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole, paramtable.GetNodeID()),
// TODO(dragondriver): fill ConnectedComponents if necessary
ConnectedComponents: []metricsinfo.ConnectionInfo{},
},

View File

@ -52,7 +52,7 @@ type MockQueryNode struct {
segmentVersion map[int64]int64
}
func NewMockQueryNode(t *testing.T, etcdCli *clientv3.Client) *MockQueryNode {
func NewMockQueryNode(t *testing.T, etcdCli *clientv3.Client, nodeID int64) *MockQueryNode {
ctx, cancel := context.WithCancel(context.Background())
node := &MockQueryNode{
MockQueryNodeServer: NewMockQueryNodeServer(t),
@ -61,6 +61,7 @@ func NewMockQueryNode(t *testing.T, etcdCli *clientv3.Client) *MockQueryNode {
session: sessionutil.NewSession(ctx, Params.EtcdCfg.MetaRootPath, etcdCli),
channels: make(map[int64][]string),
segments: make(map[int64]map[string][]int64),
ID: nodeID,
}
return node
@ -111,9 +112,9 @@ func (node *MockQueryNode) Start() error {
node.segmentVersion[segment.GetSegmentID()] = req.GetVersion()
}).Return(successStatus, nil).Maybe()
// Regiser
// Register
node.session.Init(typeutil.QueryNodeRole, node.addr, false, true)
node.ID = node.session.ServerID
node.session.ServerID = node.ID
node.session.Register()
log.Debug("mock QueryNode started",
zap.Int64("nodeID", node.ID),

View File

@ -26,7 +26,7 @@ import (
"github.com/milvus-io/milvus/internal/util/paramtable"
)
var Params paramtable.ComponentParam
var Params *paramtable.ComponentParam = paramtable.Get()
var (
ErrFailedAllocateID = errors.New("failed to allocate ID")

View File

@ -53,6 +53,7 @@ import (
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -60,7 +61,7 @@ import (
var (
// Only for re-export
Params = &params.Params
Params = params.Params
)
type Server struct {
@ -69,6 +70,7 @@ type Server struct {
wg sync.WaitGroup
status atomic.Value
etcdCli *clientv3.Client
address string
session *sessionutil.Session
kv kv.MetaKv
idAllocator func() (int64, error)
@ -147,17 +149,17 @@ func (s *Server) Register() error {
func (s *Server) Init() error {
log.Info("QueryCoord start init",
zap.String("meta-root-path", Params.EtcdCfg.MetaRootPath),
zap.String("address", Params.QueryCoordCfg.Address))
zap.String("address", s.address))
// Init QueryCoord session
s.session = sessionutil.NewSession(s.ctx, Params.EtcdCfg.MetaRootPath, s.etcdCli)
if s.session == nil {
return fmt.Errorf("failed to create session")
}
s.session.Init(typeutil.QueryCoordRole, Params.QueryCoordCfg.Address, true, true)
s.session.Init(typeutil.QueryCoordRole, s.address, true, true)
s.enableActiveStandBy = Params.QueryCoordCfg.EnableActiveStandby
s.session.SetEnableActiveStandBy(s.enableActiveStandBy)
Params.QueryCoordCfg.SetNodeID(s.session.ServerID)
paramtable.SetNodeID(s.session.ServerID)
Params.SetLogger(s.session.ServerID)
s.factory.Init(Params)
@ -443,6 +445,10 @@ func (s *Server) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringRespon
}, nil
}
func (s *Server) SetAddress(address string) {
s.address = address
}
// SetEtcdClient sets etcd's client
func (s *Server) SetEtcdClient(etcdClient *clientv3.Client) {
s.etcdCli = etcdClient

View File

@ -103,7 +103,7 @@ func (suite *ServerSuite) SetupTest() {
suite.NoError(err)
for i := range suite.nodes {
suite.nodes[i] = mocks.NewMockQueryNode(suite.T(), suite.server.etcdCli)
suite.nodes[i] = mocks.NewMockQueryNode(suite.T(), suite.server.etcdCli, int64(i))
err := suite.nodes[i].Start()
suite.Require().NoError(err)
ok := suite.waitNodeUp(suite.nodes[i], 5*time.Second)
@ -143,7 +143,7 @@ func (suite *ServerSuite) TestRecover() {
}
func (suite *ServerSuite) TestNodeUp() {
newNode := mocks.NewMockQueryNode(suite.T(), suite.server.etcdCli)
newNode := mocks.NewMockQueryNode(suite.T(), suite.server.etcdCli, 100)
newNode.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{}, nil)
err := newNode.Start()
suite.NoError(err)

View File

@ -23,6 +23,7 @@ import (
"sync"
"github.com/milvus-io/milvus/internal/util/errorutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"golang.org/x/sync/errgroup"
@ -561,7 +562,7 @@ func (s *Server) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest
resp := &milvuspb.GetMetricsResponse{
Status: successStatus,
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryCoordRole,
Params.QueryCoordCfg.GetNodeID()),
paramtable.GetNodeID()),
}
metricType, err := metricsinfo.ParseMetricType(req.GetRequest())

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/querycoordv2/mocks"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"google.golang.org/grpc"
@ -42,6 +43,7 @@ type ClusterTestSuite struct {
}
func (suite *ClusterTestSuite) SetupSuite() {
paramtable.Init()
suite.setupServers()
}

View File

@ -30,6 +30,7 @@ import (
"sync/atomic"
"unsafe"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/metrics"
@ -123,7 +124,7 @@ OUTER:
c.vChannels = append(c.vChannels, dstChan)
}
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Add(float64(len(c.vChannels)))
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Add(float64(len(c.vChannels)))
}
// getVChannels get virtual channels of collection
@ -151,7 +152,7 @@ func (c *Collection) removeVChannel(channel Channel) {
zap.String("channel", channel),
)
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(len(c.vChannels)))
metrics.QueryNodeNumDmlChannels.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Sub(float64(len(c.vChannels)))
}
// addPChannels add physical channels to physical channels of collection
@ -283,7 +284,7 @@ OUTER:
c.vDeltaChannels = append(c.vDeltaChannels, dstChan)
}
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Add(float64(len(c.vDeltaChannels)))
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Add(float64(len(c.vDeltaChannels)))
}
func (c *Collection) removeVDeltaChannel(channel Channel) {
@ -301,7 +302,7 @@ func (c *Collection) removeVDeltaChannel(channel Channel) {
zap.String("channel", channel),
)
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(len(c.vDeltaChannels)))
metrics.QueryNodeNumDeltaChannels.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Sub(float64(len(c.vDeltaChannels)))
}
func (c *Collection) AddVDeltaChannels(toLoadChannels []Channel, VPChannels map[string]string) []Channel {

View File

@ -27,6 +27,7 @@ import (
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
)
// dataSyncService manages a lot of flow graphs
@ -88,7 +89,7 @@ func (dsService *dataSyncService) addFlowGraphsForDMLChannels(collectionID Uniqu
log.Info("add DML flow graph",
zap.Any("collectionID", collectionID),
zap.Any("channel", channel))
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
}
return results, nil
@ -133,7 +134,7 @@ func (dsService *dataSyncService) addFlowGraphsForDeltaChannels(collectionID Uni
log.Info("add delta flow graph",
zap.Any("collectionID", collectionID),
zap.Any("channel", channel))
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
}
return results, nil
@ -206,7 +207,7 @@ func (dsService *dataSyncService) removeFlowGraphsByDMLChannels(channels []Chann
if _, ok := dsService.dmlChannel2FlowGraph[channel]; ok {
// close flow graph
dsService.dmlChannel2FlowGraph[channel].close()
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Dec()
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Dec()
}
delete(dsService.dmlChannel2FlowGraph, channel)
rateCol.removeTSafeChannel(channel)
@ -222,7 +223,7 @@ func (dsService *dataSyncService) removeFlowGraphsByDeltaChannels(channels []Cha
if _, ok := dsService.deltaChannel2FlowGraph[channel]; ok {
// close flow graph
dsService.deltaChannel2FlowGraph[channel].close()
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Dec()
metrics.QueryNodeNumFlowGraphs.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Dec()
}
delete(dsService.deltaChannel2FlowGraph, channel)
rateCol.removeTSafeChannel(channel)

View File

@ -31,6 +31,7 @@ import (
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/util/flowgraph"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/trace"
)
@ -101,7 +102,7 @@ func (fdmNode *filterDmNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
if resMsg != nil {
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
rateCol.Add(metricsinfo.InsertConsumeThroughput, float64(proto.Size(&resMsg.InsertRequest)))
metrics.QueryNodeConsumeCounter.WithLabelValues(strconv.FormatInt(Params.QueryNodeCfg.GetNodeID(), 10), metrics.InsertLabel).Add(float64(proto.Size(&resMsg.InsertRequest)))
metrics.QueryNodeConsumeCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.InsertLabel).Add(float64(proto.Size(&resMsg.InsertRequest)))
}
case commonpb.MsgType_Delete:
resMsg, err := fdmNode.filterInvalidDeleteMessage(msg.(*msgstream.DeleteMsg), collection.getLoadType())
@ -114,7 +115,7 @@ func (fdmNode *filterDmNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
if resMsg != nil {
iMsg.deleteMessages = append(iMsg.deleteMessages, resMsg)
rateCol.Add(metricsinfo.DeleteConsumeThroughput, float64(proto.Size(&resMsg.DeleteRequest)))
metrics.QueryNodeConsumeCounter.WithLabelValues(strconv.FormatInt(Params.QueryNodeCfg.GetNodeID(), 10), metrics.DeleteLabel).Add(float64(proto.Size(&resMsg.DeleteRequest)))
metrics.QueryNodeConsumeCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.DeleteLabel).Add(float64(proto.Size(&resMsg.DeleteRequest)))
}
default:
log.Warn("invalid message type in filterDmNode",

View File

@ -30,6 +30,7 @@ import (
"github.com/milvus-io/milvus/internal/mq/msgstream/mqwrapper"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/flowgraph"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/tsoutil"
)
@ -213,7 +214,7 @@ func (q *queryNodeFlowGraph) consumeFlowGraph(channel Channel, subName ConsumeSu
zap.String("subName", subName),
)
q.consumerCnt++
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
return nil
}
@ -230,7 +231,7 @@ func (q *queryNodeFlowGraph) consumeFlowGraphFromLatest(channel Channel, subName
zap.String("subName", subName),
)
q.consumerCnt++
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
return nil
}
@ -251,7 +252,7 @@ func (q *queryNodeFlowGraph) consumeFlowGraphFromPosition(position *internalpb.M
zap.Duration("elapse", time.Since(start)),
)
q.consumerCnt++
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Inc()
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
return err
}
@ -260,7 +261,7 @@ func (q *queryNodeFlowGraph) close() {
q.cancel()
q.flowGraph.Close()
if q.dmlStream != nil && q.consumerCnt > 0 {
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Sub(float64(q.consumerCnt))
metrics.QueryNodeNumConsumers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Sub(float64(q.consumerCnt))
}
log.Info("stop query node flow graph",
zap.Int64("collectionID", q.collectionID),

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/milvus-io/milvus/internal/util/timerecord"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
@ -59,7 +60,7 @@ func (node *QueryNode) GetComponentStates(ctx context.Context) (*milvuspb.Compon
}
nodeID := common.NotRegisteredID
if node.session != nil && node.session.Registered() {
nodeID = node.session.ServerID
nodeID = paramtable.GetNodeID()
}
info := &milvuspb.ComponentInfo{
NodeID: nodeID,
@ -157,7 +158,7 @@ func (node *QueryNode) getStatisticsWithDmlChannel(ctx context.Context, req *que
}
if !node.isHealthy() {
failRet.Status.Reason = msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())
failRet.Status.Reason = msgQueryNodeIsUnhealthy(paramtable.GetNodeID())
return failRet, nil
}
@ -284,7 +285,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *querypb.WatchDmC
// check node healthy
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -293,10 +294,10 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *querypb.WatchDmC
}
// check target matches
if in.GetBase().GetTargetID() != node.session.ServerID {
if in.GetBase().GetTargetID() != paramtable.GetNodeID() {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_NodeIDNotMatch,
Reason: common.WrapNodeIDNotMatchMsg(in.GetBase().GetTargetID(), node.session.ServerID),
Reason: common.WrapNodeIDNotMatchMsg(in.GetBase().GetTargetID(), paramtable.GetNodeID()),
}
return status, nil
}
@ -313,7 +314,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *querypb.WatchDmC
startTs := time.Now()
log.Info("watchDmChannels init", zap.Int64("collectionID", in.CollectionID),
zap.String("channelName", in.Infos[0].GetChannelName()),
zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
zap.Int64("nodeID", paramtable.GetNodeID()))
// currently we only support load one channel as a time
node.taskLock.RLock(strconv.FormatInt(in.Infos[0].CollectionID, 10))
defer node.taskLock.RUnlock(strconv.FormatInt(in.Infos[0].CollectionID, 10))
@ -354,7 +355,7 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *querypb.WatchDmC
sc, _ := node.ShardClusterService.getShardCluster(in.Infos[0].GetChannelName())
sc.SetupFirstVersion()
log.Info("successfully watchDmChannelsTask", zap.Int64("collectionID", in.CollectionID),
zap.String("channelName", in.Infos[0].GetChannelName()), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
zap.String("channelName", in.Infos[0].GetChannelName()), zap.Int64("nodeID", paramtable.GetNodeID()))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
@ -367,7 +368,7 @@ func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmC
// check node healthy
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -376,10 +377,10 @@ func (node *QueryNode) UnsubDmChannel(ctx context.Context, req *querypb.UnsubDmC
}
// check target matches
if req.GetBase().GetTargetID() != node.session.ServerID {
if req.GetBase().GetTargetID() != paramtable.GetNodeID() {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_NodeIDNotMatch,
Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), node.session.ServerID),
Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), paramtable.GetNodeID()),
}
return status, nil
}
@ -430,7 +431,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegment
// check node healthy
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -438,10 +439,10 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegment
return status, nil
}
// check target matches
if in.GetBase().GetTargetID() != node.session.ServerID {
if in.GetBase().GetTargetID() != paramtable.GetNodeID() {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_NodeIDNotMatch,
Reason: common.WrapNodeIDNotMatchMsg(in.GetBase().GetTargetID(), node.session.ServerID),
Reason: common.WrapNodeIDNotMatchMsg(in.GetBase().GetTargetID(), paramtable.GetNodeID()),
}
return status, nil
}
@ -470,7 +471,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegment
startTs := time.Now()
log.Info("loadSegmentsTask init", zap.Int64("collectionID", in.CollectionID),
zap.Int64s("segmentIDs", segmentIDs),
zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
zap.Int64("nodeID", paramtable.GetNodeID()))
node.taskLock.RLock(strconv.FormatInt(in.CollectionID, 10))
for _, segmentID := range segmentIDs {
@ -499,7 +500,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegment
return status, nil
}
log.Info("loadSegmentsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
log.Info("loadSegmentsTask Enqueue done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", paramtable.GetNodeID()))
waitFunc := func() (*commonpb.Status, error) {
err = task.WaitToFinish()
@ -511,7 +512,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegment
log.Warn(err.Error())
return status, nil
}
log.Info("loadSegmentsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", Params.QueryNodeCfg.GetNodeID()))
log.Info("loadSegmentsTask WaitToFinish done", zap.Int64("collectionID", in.CollectionID), zap.Int64s("segmentIDs", segmentIDs), zap.Int64("nodeID", paramtable.GetNodeID()))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
@ -524,7 +525,7 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *querypb.LoadSegment
func (node *QueryNode) ReleaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -572,7 +573,7 @@ func (node *QueryNode) ReleaseCollection(ctx context.Context, in *querypb.Releas
func (node *QueryNode) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -621,7 +622,7 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, in *querypb.ReleaseS
// check node healthy
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -629,10 +630,10 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, in *querypb.ReleaseS
return status, nil
}
// check target matches
if in.GetBase().GetTargetID() != node.session.ServerID {
if in.GetBase().GetTargetID() != paramtable.GetNodeID() {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_NodeIDNotMatch,
Reason: common.WrapNodeIDNotMatchMsg(in.GetBase().GetTargetID(), node.session.ServerID),
Reason: common.WrapNodeIDNotMatchMsg(in.GetBase().GetTargetID(), paramtable.GetNodeID()),
}
return status, nil
}
@ -683,7 +684,7 @@ func (node *QueryNode) ReleaseSegments(ctx context.Context, in *querypb.ReleaseS
func (node *QueryNode) GetSegmentInfo(ctx context.Context, in *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
res := &querypb.GetSegmentInfoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
@ -789,13 +790,13 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
if !req.FromShardLeader {
rateCol.Add(metricsinfo.NQPerSecond, float64(req.GetReq().GetNq()))
rateCol.Add(metricsinfo.SearchThroughput, float64(proto.Size(req)))
metrics.QueryNodeExecuteCounter.WithLabelValues(strconv.FormatInt(Params.QueryNodeCfg.GetNodeID(), 10), metrics.SearchLabel).Add(float64(proto.Size(req)))
metrics.QueryNodeExecuteCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.SearchLabel).Add(float64(proto.Size(req)))
}
return ret, nil
}
func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.SearchRequest, dmlChannel string) (*internalpb.SearchResults, error) {
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.TotalLabel).Inc()
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel, metrics.TotalLabel).Inc()
failRet := &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
@ -804,11 +805,11 @@ func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.Se
defer func() {
if failRet.Status.ErrorCode != commonpb.ErrorCode_Success {
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.FailLabel).Inc()
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel, metrics.FailLabel).Inc()
}
}()
if !node.isHealthy() {
failRet.Status.Reason = msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())
failRet.Status.Reason = msgQueryNodeIsUnhealthy(paramtable.GetNodeID())
return failRet, nil
}
@ -868,13 +869,13 @@ func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.Se
msgID, req.GetFromShardLeader(), dmlChannel, req.GetSegmentIDs()))
failRet.Status.ErrorCode = commonpb.ErrorCode_Success
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.SearchLabel).Observe(float64(historicalTask.queueDur.Milliseconds()))
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.SearchLabel).Observe(float64(historicalTask.reduceDur.Milliseconds()))
latency := tr.ElapseSpan()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.SuccessLabel).Inc()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel, metrics.SuccessLabel).Inc()
return historicalTask.Ret, nil
}
@ -908,9 +909,9 @@ func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.Se
if err != nil {
return err
}
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.SearchLabel).Observe(float64(streamingTask.queueDur.Milliseconds()))
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.SearchLabel).Observe(float64(streamingTask.reduceDur.Milliseconds()))
streamingResult = streamingTask.Ret
return nil
@ -939,16 +940,16 @@ func (node *QueryNode) searchWithDmlChannel(ctx context.Context, req *querypb.Se
failRet.Status.ErrorCode = commonpb.ErrorCode_Success
latency := tr.ElapseSpan()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.SuccessLabel).Inc()
metrics.QueryNodeSearchNQ.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Observe(float64(req.Req.GetNq()))
metrics.QueryNodeSearchTopK.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID())).Observe(float64(req.Req.GetTopk()))
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel, metrics.SuccessLabel).Inc()
metrics.QueryNodeSearchNQ.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Observe(float64(req.Req.GetNq()))
metrics.QueryNodeSearchTopK.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Observe(float64(req.Req.GetTopk()))
return ret, nil
}
func (node *QueryNode) queryWithDmlChannel(ctx context.Context, req *querypb.QueryRequest, dmlChannel string) (*internalpb.RetrieveResults, error) {
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel, metrics.TotalLabel).Inc()
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel, metrics.TotalLabel).Inc()
failRet := &internalpb.RetrieveResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
@ -957,11 +958,11 @@ func (node *QueryNode) queryWithDmlChannel(ctx context.Context, req *querypb.Que
defer func() {
if failRet.Status.ErrorCode != commonpb.ErrorCode_Success {
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.SearchLabel, metrics.FailLabel).Inc()
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel, metrics.FailLabel).Inc()
}
}()
if !node.isHealthy() {
failRet.Status.Reason = msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())
failRet.Status.Reason = msgQueryNodeIsUnhealthy(paramtable.GetNodeID())
return failRet, nil
}
@ -1014,13 +1015,13 @@ func (node *QueryNode) queryWithDmlChannel(ctx context.Context, req *querypb.Que
msgID, req.GetFromShardLeader(), dmlChannel, req.GetSegmentIDs()))
failRet.Status.ErrorCode = commonpb.ErrorCode_Success
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.QueryLabel).Observe(float64(queryTask.queueDur.Milliseconds()))
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.QueryLabel).Observe(float64(queryTask.reduceDur.Milliseconds()))
latency := tr.ElapseSpan()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel, metrics.SuccessLabel).Inc()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel, metrics.SuccessLabel).Inc()
return queryTask.Ret, nil
}
@ -1051,9 +1052,9 @@ func (node *QueryNode) queryWithDmlChannel(ctx context.Context, req *querypb.Que
if err != nil {
return err
}
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeSQLatencyInQueue.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.QueryLabel).Observe(float64(streamingTask.queueDur.Milliseconds()))
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()),
metrics.QueryNodeReduceLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
metrics.QueryLabel).Observe(float64(streamingTask.reduceDur.Milliseconds()))
streamingResult = streamingTask.Ret
return nil
@ -1083,8 +1084,8 @@ func (node *QueryNode) queryWithDmlChannel(ctx context.Context, req *querypb.Que
failRet.Status.ErrorCode = commonpb.ErrorCode_Success
latency := tr.ElapseSpan()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(Params.QueryNodeCfg.GetNodeID()), metrics.QueryLabel, metrics.SuccessLabel).Inc()
metrics.QueryNodeSQReqLatency.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel).Observe(float64(latency.Milliseconds()))
metrics.QueryNodeSQCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.QueryLabel, metrics.SuccessLabel).Inc()
return ret, nil
}
@ -1145,7 +1146,7 @@ func (node *QueryNode) Query(ctx context.Context, req *querypb.QueryRequest) (*i
if !req.FromShardLeader {
rateCol.Add(metricsinfo.NQPerSecond, 1)
metrics.QueryNodeExecuteCounter.WithLabelValues(strconv.FormatInt(Params.QueryNodeCfg.GetNodeID(), 10), metrics.QueryLabel).Add(float64(proto.Size(req)))
metrics.QueryNodeExecuteCounter.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), metrics.QueryLabel).Add(float64(proto.Size(req)))
}
return ret, nil
}
@ -1155,7 +1156,7 @@ func (node *QueryNode) SyncReplicaSegments(ctx context.Context, req *querypb.Syn
if !node.isHealthy() {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
Reason: msgQueryNodeIsUnhealthy(paramtable.GetNodeID()),
}, nil
}
@ -1179,14 +1180,14 @@ func (node *QueryNode) SyncReplicaSegments(ctx context.Context, req *querypb.Syn
func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
if !node.isHealthy() {
log.Warn("QueryNode.ShowConfigurations failed",
zap.Int64("nodeId", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Pattern),
zap.Error(errQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())))
zap.Error(errQueryNodeIsUnhealthy(paramtable.GetNodeID())))
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
Reason: msgQueryNodeIsUnhealthy(paramtable.GetNodeID()),
},
Configuations: nil,
}, nil
@ -1199,14 +1200,14 @@ func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.S
func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
if !node.isHealthy() {
log.Warn("QueryNode.GetMetrics failed",
zap.Int64("nodeId", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(errQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())))
zap.Error(errQueryNodeIsUnhealthy(paramtable.GetNodeID())))
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
Reason: msgQueryNodeIsUnhealthy(paramtable.GetNodeID()),
},
Response: "",
}, nil
@ -1215,7 +1216,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
metricType, err := metricsinfo.ParseMetricType(req.Request)
if err != nil {
log.Warn("QueryNode.GetMetrics failed to parse metric type",
zap.Int64("nodeId", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.Error(err))
@ -1231,7 +1232,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
queryNodeMetrics, err := getSystemInfoMetrics(ctx, req, node)
if err != nil {
log.Warn("QueryNode.GetMetrics failed",
zap.Int64("nodeId", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metricType", metricType),
zap.Error(err))
@ -1243,7 +1244,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
}, nil
}
log.Debug("QueryNode.GetMetrics",
zap.Int64("node_id", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("node_id", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metric_type", metricType),
zap.Any("queryNodeMetrics", queryNodeMetrics))
@ -1252,7 +1253,7 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
}
log.Debug("QueryNode.GetMetrics failed, request metric type is not implemented yet",
zap.Int64("nodeId", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("nodeId", paramtable.GetNodeID()),
zap.String("req", req.Request),
zap.String("metricType", metricType))
@ -1268,25 +1269,25 @@ func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsR
func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.GetDataDistributionRequest) (*querypb.GetDataDistributionResponse, error) {
log := log.With(
zap.Int64("msg-id", req.GetBase().GetMsgID()),
zap.Int64("node-id", Params.QueryNodeCfg.GetNodeID()),
zap.Int64("node-id", paramtable.GetNodeID()),
)
if !node.isHealthy() {
log.Warn("QueryNode.GetMetrics failed",
zap.Error(errQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID())))
zap.Error(errQueryNodeIsUnhealthy(paramtable.GetNodeID())))
return &querypb.GetDataDistributionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeCfg.GetNodeID()),
Reason: msgQueryNodeIsUnhealthy(paramtable.GetNodeID()),
},
}, nil
}
// check target matches
if req.GetBase().GetTargetID() != node.session.ServerID {
if req.GetBase().GetTargetID() != paramtable.GetNodeID() {
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_NodeIDNotMatch,
Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), node.session.ServerID),
Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), paramtable.GetNodeID()),
}
return &querypb.GetDataDistributionResponse{Status: status}, nil
}
@ -1344,7 +1345,7 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
return &querypb.GetDataDistributionResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
NodeID: node.session.ServerID,
NodeID: paramtable.GetNodeID(),
Segments: segmentVersionInfos,
Channels: channelVersionInfos,
LeaderViews: leaderViews,
@ -1356,7 +1357,7 @@ func (node *QueryNode) SyncDistribution(ctx context.Context, req *querypb.SyncDi
// check node healthy
code := node.stateCode.Load().(commonpb.StateCode)
if code != commonpb.StateCode_Healthy {
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeCfg.GetNodeID())
err := fmt.Errorf("query node %d is not ready", paramtable.GetNodeID())
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: err.Error(),
@ -1364,11 +1365,11 @@ func (node *QueryNode) SyncDistribution(ctx context.Context, req *querypb.SyncDi
return status, nil
}
// check target matches
if req.GetBase().GetTargetID() != node.session.ServerID {
if req.GetBase().GetTargetID() != paramtable.GetNodeID() {
log.Warn("failed to do match target id when sync ", zap.Int64("expect", req.GetBase().GetTargetID()), zap.Int64("actual", node.session.ServerID))
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_NodeIDNotMatch,
Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), node.session.ServerID),
Reason: common.WrapNodeIDNotMatchMsg(req.GetBase().GetTargetID(), paramtable.GetNodeID()),
}
return status, nil
}

View File

@ -27,12 +27,13 @@ import "C"
import (
"encoding/json"
"fmt"
"path/filepath"
"unsafe"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/indexparams"
"go.uber.org/zap"
"path/filepath"
"unsafe"
"github.com/milvus-io/milvus-proto/go-api/schemapb"
"github.com/milvus-io/milvus/internal/proto/querypb"

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus/internal/log"
queryPb "github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/paramtable"
"github.com/samber/lo"
)
@ -172,7 +173,7 @@ func (l *loadSegmentsTask) watchDeltaChannel(vchanName []string) error {
log.Warn("watchDeltaChannel, add flowGraph for deltaChannel failed", zap.Int64("collectionID", collectionID), zap.Strings("vDeltaChannels", vDeltaChannels), zap.Error(err))
return err
}
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, Params.QueryNodeCfg.GetNodeID())
consumeSubName := funcutil.GenChannelSubName(Params.CommonCfg.QueryNodeSubName, collectionID, paramtable.GetNodeID())
// channels as consumer
for channel, fg := range channel2FlowGraph {

Some files were not shown because too many files have changed in this diff Show More