mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-07 19:31:51 +08:00
Signed-off-by: yun.zhang <yun.zhang@zilliz.com> Signed-off-by: yun.zhang <yun.zhang@zilliz.com> Co-authored-by: Xiaofan <83447078+xiaofan-luan@users.noreply.github.com>
This commit is contained in:
parent
e7429f88af
commit
47a1bdf2df
@ -203,7 +203,7 @@ func (m *Manager) getConfigValueBySource(configKey, sourceName string) (string,
|
||||
|
||||
func (m *Manager) updateEvent(e *Event) error {
|
||||
// refresh all configuration one by one
|
||||
log.Debug("receive update event", zap.Any("event", e))
|
||||
log.Info("receive update event", zap.Any("event", e))
|
||||
if e.HasUpdated {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ func (bm *DelBufferManager) ShouldFlushSegments() []UniqueID {
|
||||
segMem := heap.Pop(bm.delBufHeap).(*Item)
|
||||
poppedSegMem = append(poppedSegMem, segMem)
|
||||
shouldFlushSegments = append(shouldFlushSegments, segMem.segmentID)
|
||||
log.Debug("add segment for delete buf flush", zap.Int64("segmentID", segMem.segmentID))
|
||||
log.Info("add segment for delete buf flush", zap.Int64("segmentID", segMem.segmentID))
|
||||
mmUsage -= segMem.memorySize
|
||||
if mmUsage < Params.DataNodeCfg.FlushDeleteBufferBytes {
|
||||
break
|
||||
|
||||
@ -106,12 +106,12 @@ func (cm *ConnectionManager) AddDependency(roleName string) error {
|
||||
|
||||
msess, rev, err := cm.session.GetSessions(roleName)
|
||||
if err != nil {
|
||||
log.Debug("ClientManager GetSessions failed", zap.Any("roleName", roleName))
|
||||
log.Warn("ClientManager GetSessions failed", zap.Any("roleName", roleName))
|
||||
return err
|
||||
}
|
||||
|
||||
if len(msess) == 0 {
|
||||
log.Debug("No nodes are currently alive", zap.Any("roleName", roleName))
|
||||
log.Warn("No nodes are currently alive", zap.Any("roleName", roleName))
|
||||
} else {
|
||||
for _, value := range msess {
|
||||
cm.buildConnections(value)
|
||||
@ -245,7 +245,7 @@ func (cm *ConnectionManager) processEvent(channel <-chan *sessionutil.SessionEve
|
||||
}
|
||||
switch ev.EventType {
|
||||
case sessionutil.SessionAddEvent:
|
||||
log.Debug("ConnectionManager", zap.Any("add event", ev.Session))
|
||||
log.Info("ConnectionManager", zap.Any("add event", ev.Session))
|
||||
cm.buildConnections(ev.Session)
|
||||
case sessionutil.SessionDelEvent:
|
||||
cm.removeTask(ev.Session.ServerID)
|
||||
@ -265,12 +265,11 @@ func (cm *ConnectionManager) receiveFinishTask() {
|
||||
case serverID := <-cm.notify:
|
||||
cm.taskMu.Lock()
|
||||
task, ok := cm.buildTasks[serverID]
|
||||
log.Debug("ConnectionManager", zap.Any("receive finish", serverID))
|
||||
log.Info("ConnectionManager", zap.Any("receive finish", serverID))
|
||||
if ok {
|
||||
log.Debug("ConnectionManager", zap.Any("get task ok", serverID))
|
||||
log.Debug("ConnectionManager", zap.Any("task state", task.state))
|
||||
log.Info("ConnectionManager", zap.Any("get task ok", serverID))
|
||||
if task.state == buildClientSuccess {
|
||||
log.Debug("ConnectionManager", zap.Any("build success", serverID))
|
||||
log.Info("ConnectionManager", zap.Any("build success", serverID))
|
||||
cm.addConnection(task.sess.ServerID, task.result)
|
||||
cm.buildClients(task.sess, task.result)
|
||||
}
|
||||
@ -393,7 +392,7 @@ func (bct *buildClientTask) Run() {
|
||||
defer bct.finish()
|
||||
connectGrpcFunc := func() error {
|
||||
opts := trace.GetInterceptorOpts()
|
||||
log.Debug("Grpc connect ", zap.String("Address", bct.sess.Address))
|
||||
log.Info("Grpc connect ", zap.String("Address", bct.sess.Address))
|
||||
conn, err := grpc.DialContext(bct.ctx, bct.sess.Address,
|
||||
grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(30*time.Second),
|
||||
grpc.WithDisableRetry(),
|
||||
@ -423,9 +422,9 @@ func (bct *buildClientTask) Run() {
|
||||
}
|
||||
|
||||
err := retry.Do(bct.ctx, connectGrpcFunc, bct.retryOptions...)
|
||||
log.Debug("ConnectionManager", zap.Any("build connection finish", bct.sess.ServerID))
|
||||
log.Info("ConnectionManager", zap.Any("build connection finish", bct.sess.ServerID))
|
||||
if err != nil {
|
||||
log.Debug("BuildClientTask try connect failed",
|
||||
log.Warn("BuildClientTask try connect failed",
|
||||
zap.Any("roleName", bct.sess.ServerName), zap.Error(err))
|
||||
bct.state = buildClientFailed
|
||||
return
|
||||
@ -437,7 +436,7 @@ func (bct *buildClientTask) Stop() {
|
||||
}
|
||||
|
||||
func (bct *buildClientTask) finish() {
|
||||
log.Debug("ConnectionManager", zap.Any("notify connection finish", bct.sess.ServerID))
|
||||
log.Info("ConnectionManager", zap.Any("notify connection finish", bct.sess.ServerID))
|
||||
bct.notify <- bct.sess.ServerID
|
||||
}
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ func NewClient(ctx context.Context, metaRoot string, etcdCli *clientv3.Client) (
|
||||
sess := sessionutil.NewSession(ctx, metaRoot, etcdCli)
|
||||
if sess == nil {
|
||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
||||
log.Debug("DataCoordClient NewClient failed", zap.Error(err))
|
||||
log.Warn("DataCoordClient NewClient failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
ClientParams.InitOnce(typeutil.DataCoordRole)
|
||||
@ -90,12 +90,12 @@ func (c *Client) getDataCoordAddr() (string, error) {
|
||||
key := c.grpcClient.GetRole()
|
||||
msess, _, err := c.sess.GetSessions(key)
|
||||
if err != nil {
|
||||
log.Debug("DataCoordClient, getSessions failed", zap.Any("key", key), zap.Error(err))
|
||||
log.Warn("DataCoordClient, getSessions failed", zap.Any("key", key), zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
ms, ok := msess[key]
|
||||
if !ok {
|
||||
log.Debug("DataCoordClient, not existed in msess ", zap.Any("key", key), zap.Any("len of msess", len(msess)))
|
||||
log.Warn("DataCoordClient, not existed in msess ", zap.Any("key", key), zap.Any("len of msess", len(msess)))
|
||||
return "", fmt.Errorf("find no available datacoord, check datacoord state")
|
||||
}
|
||||
return ms.Address, nil
|
||||
|
||||
@ -94,7 +94,7 @@ func (s *Server) init() error {
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&datacoord.Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("DataCoord connect to etcd failed", zap.Error(err))
|
||||
log.Warn("DataCoord connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
@ -102,26 +102,26 @@ func (s *Server) init() error {
|
||||
|
||||
if s.indexCoord == nil {
|
||||
var err error
|
||||
log.Debug("create IndexCoord client for DataCoord")
|
||||
log.Info("create IndexCoord client for DataCoord")
|
||||
s.indexCoord, err = icc.NewClient(s.ctx, Params.EtcdCfg.MetaRootPath, etcdCli)
|
||||
if err != nil {
|
||||
log.Warn("failed to create IndexCoord client for DataCoord", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("create IndexCoord client for DataCoord done")
|
||||
log.Info("create IndexCoord client for DataCoord done")
|
||||
}
|
||||
|
||||
log.Debug("init IndexCoord client for DataCoord")
|
||||
log.Info("init IndexCoord client for DataCoord")
|
||||
if err := s.indexCoord.Init(); err != nil {
|
||||
log.Warn("failed to init IndexCoord client for DataCoord", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init IndexCoord client for DataCoord done")
|
||||
log.Info("init IndexCoord client for DataCoord done")
|
||||
s.dataCoord.SetIndexCoord(s.indexCoord)
|
||||
|
||||
err = s.startGrpc()
|
||||
if err != nil {
|
||||
log.Debug("DataCoord startGrpc failed", zap.Error(err))
|
||||
log.Warn("DataCoord startGrpc failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
defer logutil.LogPanic()
|
||||
defer s.wg.Done()
|
||||
|
||||
log.Debug("network port", zap.Int("port", grpcPort))
|
||||
log.Info("network port", zap.Int("port", grpcPort))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Error("grpc server failed to listen error", zap.Error(err))
|
||||
@ -192,7 +192,7 @@ func (s *Server) start() error {
|
||||
}
|
||||
err = s.dataCoord.Register()
|
||||
if err != nil {
|
||||
log.Debug("DataCoord register service failed", zap.Error(err))
|
||||
log.Warn("DataCoord register service failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -201,7 +201,7 @@ func (s *Server) start() error {
|
||||
// Stop stops the DataCoord server gracefully.
|
||||
// Need to call the GracefulStop interface of grpc server and call the stop method of the inner DataCoord object.
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("Datacoord stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("Datacoord stop", zap.String("Address", Params.GetAddress()))
|
||||
var err error
|
||||
if s.closer != nil {
|
||||
if err = s.closer.Close(); err != nil {
|
||||
@ -214,7 +214,7 @@ func (s *Server) Stop() error {
|
||||
defer s.etcdCli.Close()
|
||||
}
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
@ -233,12 +233,12 @@ func (s *Server) Run() error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("DataCoord init done ...")
|
||||
log.Info("DataCoord init done ...")
|
||||
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("DataCoord start done ...")
|
||||
log.Info("DataCoord start done ...")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -184,7 +184,7 @@ func (s *Server) Run() error {
|
||||
|
||||
// Stop stops Datanode's grpc service.
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("Datanode stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("Datanode stop", zap.String("Address", Params.GetAddress()))
|
||||
if s.closer != nil {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
return err
|
||||
@ -195,7 +195,7 @@ func (s *Server) Stop() error {
|
||||
defer s.etcdCli.Close()
|
||||
}
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
// make graceful stop has a timeout
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
@ -277,7 +277,7 @@ func (s *Server) init() error {
|
||||
|
||||
// --- DataCoord Client ---
|
||||
if s.newDataCoordClient != nil {
|
||||
log.Debug("starting DataCoord client for DataNode")
|
||||
log.Info("starting DataCoord client for DataNode")
|
||||
dataCoordClient, err := s.newDataCoordClient(dn.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err != nil {
|
||||
log.Error("failed to create new DataCoord client", zap.Error(err))
|
||||
@ -318,7 +318,7 @@ func (s *Server) start() error {
|
||||
}
|
||||
err := s.datanode.Register()
|
||||
if err != nil {
|
||||
log.Debug("failed to register to Etcd", zap.Error(err))
|
||||
log.Warn("failed to register to Etcd", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@ -52,7 +52,7 @@ func NewClient(ctx context.Context, metaRoot string, etcdCli *clientv3.Client) (
|
||||
sess := sessionutil.NewSession(ctx, metaRoot, etcdCli)
|
||||
if sess == nil {
|
||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
||||
log.Debug("IndexCoordClient NewClient failed", zap.Error(err))
|
||||
log.Warn("IndexCoordClient NewClient failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
ClientParams.InitOnce(typeutil.IndexCoordRole)
|
||||
@ -102,13 +102,13 @@ func (c *Client) getIndexCoordAddr() (string, error) {
|
||||
key := c.grpcClient.GetRole()
|
||||
msess, _, err := c.sess.GetSessions(key)
|
||||
if err != nil {
|
||||
log.Debug("IndexCoordClient GetSessions failed", zap.Any("key", key), zap.Error(err))
|
||||
log.Warn("IndexCoordClient GetSessions failed", zap.Any("key", key), zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
log.Debug("IndexCoordClient GetSessions success", zap.Any("key", key), zap.Any("msess", msess))
|
||||
log.Info("IndexCoordClient GetSessions success", zap.Any("key", key), zap.Any("msess", msess))
|
||||
ms, ok := msess[key]
|
||||
if !ok {
|
||||
log.Debug("IndexCoordClient msess key not existed", zap.Any("key", key), zap.Any("len of msess", len(msess)))
|
||||
log.Warn("IndexCoordClient msess key not existed", zap.Any("key", key), zap.Any("len of msess", len(msess)))
|
||||
return "", fmt.Errorf("find no available indexcoord, check indexcoord state")
|
||||
}
|
||||
return ms.Address, nil
|
||||
|
||||
@ -81,11 +81,11 @@ func (s *Server) Run() error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexCoord init done ...")
|
||||
log.Info("IndexCoord init done ...")
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexCoord start done ...")
|
||||
log.Info("IndexCoord start done ...")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ func (s *Server) init() error {
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&indexcoord.Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("IndexCoord connect to etcd failed", zap.Error(err))
|
||||
log.Warn("IndexCoord connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
@ -124,23 +124,23 @@ func (s *Server) init() error {
|
||||
if s.rootCoord == nil {
|
||||
s.rootCoord, err = rcc.NewClient(s.loopCtx, ic.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err != nil {
|
||||
log.Debug("IndexCoord try to new RootCoord client failed", zap.Error(err))
|
||||
log.Error("IndexCoord try to new RootCoord client failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = s.rootCoord.Init(); err != nil {
|
||||
log.Debug("IndexCoord RootCoord client init failed", zap.Error(err))
|
||||
log.Error("IndexCoord RootCoord client init failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
if err = s.rootCoord.Start(); err != nil {
|
||||
log.Debug("IndexCoord RootCoord client start failed", zap.Error(err))
|
||||
log.Error("IndexCoord RootCoord client start failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
log.Debug("IndexCoord try to wait for RootCoord ready")
|
||||
log.Info("IndexCoord try to wait for RootCoord ready")
|
||||
err = funcutil.WaitForComponentHealthy(s.loopCtx, s.rootCoord, typeutil.RootCoordRole, 1000000, time.Millisecond*200)
|
||||
if err != nil {
|
||||
log.Debug("IndexCoord wait for RootCoord ready failed", zap.Error(err))
|
||||
log.Error("IndexCoord wait for RootCoord ready failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -152,23 +152,23 @@ func (s *Server) init() error {
|
||||
if s.dataCoord == nil {
|
||||
s.dataCoord, err = dcc.NewClient(s.loopCtx, ic.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err != nil {
|
||||
log.Debug("IndexCoord try to new DataCoord client failed", zap.Error(err))
|
||||
log.Info("IndexCoord try to new DataCoord client failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = s.dataCoord.Init(); err != nil {
|
||||
log.Debug("IndexCoord DataCoordClient Init failed", zap.Error(err))
|
||||
log.Error("IndexCoord DataCoordClient Init failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
if err = s.dataCoord.Start(); err != nil {
|
||||
log.Debug("IndexCoord DataCoordClient Start failed", zap.Error(err))
|
||||
log.Error("IndexCoord DataCoordClient Start failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
log.Debug("IndexCoord try to wait for DataCoord ready")
|
||||
log.Info("IndexCoord try to wait for DataCoord ready")
|
||||
err = funcutil.WaitForComponentHealthy(s.loopCtx, s.dataCoord, typeutil.DataCoordRole, 1000000, time.Millisecond*200)
|
||||
if err != nil {
|
||||
log.Debug("IndexCoord wait for DataCoord ready failed", zap.Error(err))
|
||||
log.Warn("IndexCoord wait for DataCoord ready failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -184,18 +184,18 @@ func (s *Server) start() error {
|
||||
if err := s.indexcoord.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("indexCoord started")
|
||||
log.Info("indexCoord started")
|
||||
if err := s.indexcoord.Register(); err != nil {
|
||||
log.Error("IndexCoord", zap.Any("register session error", err))
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexCoord registers service successfully")
|
||||
log.Info("IndexCoord registers service successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops IndexCoord's grpc service.
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("Indexcoord stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("Indexcoord stop", zap.String("Address", Params.GetAddress()))
|
||||
if s.closer != nil {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
return err
|
||||
@ -209,7 +209,7 @@ func (s *Server) Stop() error {
|
||||
}
|
||||
s.loopCancel()
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
@ -308,7 +308,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead
|
||||
}
|
||||
|
||||
log.Debug("IndexCoord", zap.String("network address", Params.IP), zap.Int("network port", grpcPort))
|
||||
log.Info("IndexCoord", zap.String("network address", Params.IP), zap.Int("network port", grpcPort))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Warn("IndexCoord", zap.String("GrpcServer:failed to listen", err.Error()))
|
||||
@ -337,7 +337,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
s.grpcErrChan <- err
|
||||
}
|
||||
log.Debug("IndexCoord grpcServer loop exit")
|
||||
log.Info("IndexCoord grpcServer loop exit")
|
||||
}
|
||||
|
||||
// NewServer create a new IndexCoord grpc server.
|
||||
|
||||
@ -70,11 +70,11 @@ func (s *Server) Run() error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexNode init done ...")
|
||||
log.Info("IndexNode init done ...")
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexNode start done ...")
|
||||
log.Info("IndexNode start done ...")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ func (s *Server) Run() error {
|
||||
func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
defer s.loopWg.Done()
|
||||
|
||||
log.Debug("IndexNode", zap.String("network address", Params.GetAddress()), zap.Int("network port: ", grpcPort))
|
||||
log.Info("IndexNode", zap.String("network address", Params.GetAddress()), zap.Int("network port: ", grpcPort))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Warn("IndexNode", zap.String("GrpcServer:failed to listen", err.Error()))
|
||||
@ -158,7 +158,7 @@ func (s *Server) init() error {
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&indexnode.Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("IndexNode connect to etcd failed", zap.Error(err))
|
||||
log.Warn("IndexNode connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
@ -183,13 +183,13 @@ func (s *Server) start() error {
|
||||
log.Error("IndexNode Register etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexNode Register etcd success")
|
||||
log.Info("IndexNode Register etcd success")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops IndexNode's grpc service.
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("IndexNode stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("IndexNode stop", zap.String("Address", Params.GetAddress()))
|
||||
if s.closer != nil {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
return err
|
||||
@ -203,7 +203,7 @@ func (s *Server) Stop() error {
|
||||
defer s.etcdCli.Close()
|
||||
}
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
s.loopWg.Wait()
|
||||
|
||||
@ -151,14 +151,14 @@ func (s *Server) startExternalGrpc(grpcPort int, errChan chan error) {
|
||||
Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead
|
||||
}
|
||||
|
||||
log.Debug("Proxy server listen on tcp", zap.Int("port", grpcPort))
|
||||
log.Info("Proxy server listen on tcp", zap.Int("port", grpcPort))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Warn("Proxy server failed to listen on", zap.Error(err), zap.Int("port", grpcPort))
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debug("Proxy server already listen on tcp", zap.Int("port", grpcPort))
|
||||
log.Info("Proxy server already listen on tcp", zap.Int("port", grpcPort))
|
||||
|
||||
limiter, err := s.proxy.GetRateLimiter()
|
||||
if err != nil {
|
||||
@ -166,7 +166,7 @@ func (s *Server) startExternalGrpc(grpcPort int, errChan chan error) {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debug("Get proxy rate limiter done", zap.Int("port", grpcPort))
|
||||
log.Info("Get proxy rate limiter done", zap.Int("port", grpcPort))
|
||||
|
||||
opts := trace.GetInterceptorOpts()
|
||||
grpcOpts := []grpc.ServerOption{
|
||||
@ -227,7 +227,7 @@ func (s *Server) startExternalGrpc(grpcPort int, errChan chan error) {
|
||||
grpc_health_v1.RegisterHealthServer(s.grpcExternalServer, s)
|
||||
errChan <- nil
|
||||
|
||||
log.Debug("create Proxy grpc server",
|
||||
log.Info("create Proxy grpc server",
|
||||
zap.Any("enforcement policy", kaep),
|
||||
zap.Any("server parameters", kasp))
|
||||
|
||||
@ -250,14 +250,14 @@ func (s *Server) startInternalGrpc(grpcPort int, errChan chan error) {
|
||||
Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead
|
||||
}
|
||||
|
||||
log.Debug("Proxy internal server listen on tcp", zap.Int("port", grpcPort))
|
||||
log.Info("Proxy internal server listen on tcp", zap.Int("port", grpcPort))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Warn("Proxy internal server failed to listen on", zap.Error(err), zap.Int("port", grpcPort))
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debug("Proxy internal server already listen on tcp", zap.Int("port", grpcPort))
|
||||
log.Info("Proxy internal server already listen on tcp", zap.Int("port", grpcPort))
|
||||
|
||||
opts := trace.GetInterceptorOpts()
|
||||
s.grpcInternalServer = grpc.NewServer(
|
||||
@ -275,7 +275,7 @@ func (s *Server) startInternalGrpc(grpcPort int, errChan chan error) {
|
||||
grpc_health_v1.RegisterHealthServer(s.grpcInternalServer, s)
|
||||
errChan <- nil
|
||||
|
||||
log.Debug("create Proxy internal grpc server",
|
||||
log.Info("create Proxy internal grpc server",
|
||||
zap.Any("enforcement policy", kaep),
|
||||
zap.Any("server parameters", kasp))
|
||||
|
||||
@ -288,28 +288,27 @@ func (s *Server) startInternalGrpc(grpcPort int, errChan chan error) {
|
||||
|
||||
// Start start the Proxy Server
|
||||
func (s *Server) Run() error {
|
||||
log.Debug("init Proxy server")
|
||||
log.Info("init Proxy server")
|
||||
if err := s.init(); err != nil {
|
||||
log.Warn("init Proxy server failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init Proxy server done")
|
||||
|
||||
log.Debug("start Proxy server")
|
||||
log.Info("start Proxy server")
|
||||
if err := s.start(); err != nil {
|
||||
log.Warn("start Proxy server failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("start Proxy server done")
|
||||
log.Info("start Proxy server done")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) init() error {
|
||||
Params.InitOnce(typeutil.ProxyRole)
|
||||
log.Debug("Proxy init service's parameter table done")
|
||||
log.Info("Proxy init service's parameter table done")
|
||||
HTTPParams.InitOnce()
|
||||
log.Debug("Proxy init http server's parameter table done")
|
||||
log.Info("Proxy init http server's parameter table done")
|
||||
|
||||
if !funcutil.CheckPortAvailable(Params.Port) {
|
||||
Params.Port = funcutil.GetAvailablePort()
|
||||
@ -318,16 +317,16 @@ func (s *Server) init() error {
|
||||
|
||||
proxy.Params.InitOnce()
|
||||
proxy.Params.ProxyCfg.NetworkAddress = Params.GetInternalAddress()
|
||||
log.Debug("init Proxy's parameter table done", zap.String("internal address", Params.GetInternalAddress()), zap.String("external address", Params.GetAddress()))
|
||||
log.Info("init Proxy's parameter table done", zap.String("internal address", Params.GetInternalAddress()), zap.String("external address", Params.GetAddress()))
|
||||
|
||||
serviceName := fmt.Sprintf("Proxy ip: %s, port: %d", Params.IP, Params.Port)
|
||||
closer := trace.InitTracing(serviceName)
|
||||
s.closer = closer
|
||||
log.Debug("init Proxy's tracer done", zap.String("service name", serviceName))
|
||||
log.Info("init Proxy's tracer done", zap.String("service name", serviceName))
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&proxy.Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("Proxy connect to etcd failed", zap.Error(err))
|
||||
log.Warn("Proxy connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
@ -358,129 +357,121 @@ func (s *Server) init() error {
|
||||
|
||||
if s.rootCoordClient == nil {
|
||||
var err error
|
||||
log.Debug("create RootCoord client for Proxy")
|
||||
log.Info("create RootCoord client for Proxy")
|
||||
s.rootCoordClient, err = rcc.NewClient(s.ctx, proxy.Params.EtcdCfg.MetaRootPath, etcdCli)
|
||||
if err != nil {
|
||||
log.Warn("failed to create RootCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("create RootCoord client for Proxy done")
|
||||
log.Info("create RootCoord client for Proxy done")
|
||||
}
|
||||
|
||||
log.Debug("init RootCoord client for Proxy")
|
||||
log.Info("init RootCoord client for Proxy")
|
||||
if err := s.rootCoordClient.Init(); err != nil {
|
||||
log.Warn("failed to init RootCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init RootCoord client for Proxy done")
|
||||
log.Info("init RootCoord client for Proxy done")
|
||||
|
||||
log.Debug("Proxy wait for RootCoord to be healthy")
|
||||
log.Info("Proxy wait for RootCoord to be healthy")
|
||||
if err := funcutil.WaitForComponentHealthy(s.ctx, s.rootCoordClient, "RootCoord", 1000000, time.Millisecond*200); err != nil {
|
||||
log.Warn("Proxy failed to wait for RootCoord to be healthy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("Proxy wait for RootCoord to be healthy done")
|
||||
log.Info("Proxy wait for RootCoord to be healthy done")
|
||||
|
||||
log.Debug("set RootCoord client for Proxy")
|
||||
s.proxy.SetRootCoordClient(s.rootCoordClient)
|
||||
log.Debug("set RootCoord client for Proxy done")
|
||||
log.Info("set RootCoord client for Proxy done")
|
||||
|
||||
if s.dataCoordClient == nil {
|
||||
var err error
|
||||
log.Debug("create DataCoord client for Proxy")
|
||||
log.Info("create DataCoord client for Proxy")
|
||||
s.dataCoordClient, err = dcc.NewClient(s.ctx, proxy.Params.EtcdCfg.MetaRootPath, etcdCli)
|
||||
if err != nil {
|
||||
log.Warn("failed to create DataCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("create DataCoord client for Proxy done")
|
||||
log.Info("create DataCoord client for Proxy done")
|
||||
}
|
||||
|
||||
log.Debug("init DataCoord client for Proxy")
|
||||
log.Info("init DataCoord client for Proxy")
|
||||
if err := s.dataCoordClient.Init(); err != nil {
|
||||
log.Warn("failed to init DataCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init DataCoord client for Proxy done")
|
||||
|
||||
log.Debug("Proxy wait for DataCoord to be healthy")
|
||||
log.Info("Proxy wait for DataCoord to be healthy")
|
||||
if err := funcutil.WaitForComponentHealthy(s.ctx, s.dataCoordClient, "DataCoord", 1000000, time.Millisecond*200); err != nil {
|
||||
log.Warn("Proxy failed to wait for DataCoord to be healthy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("Proxy wait for DataCoord to be healthy done")
|
||||
log.Info("Proxy wait for DataCoord to be healthy done")
|
||||
|
||||
log.Debug("set DataCoord client for Proxy")
|
||||
s.proxy.SetDataCoordClient(s.dataCoordClient)
|
||||
log.Debug("set DataCoord client for Proxy done")
|
||||
log.Info("set DataCoord client for Proxy done")
|
||||
|
||||
if s.indexCoordClient == nil {
|
||||
var err error
|
||||
log.Debug("create IndexCoord client for Proxy")
|
||||
log.Info("create IndexCoord client for Proxy")
|
||||
s.indexCoordClient, err = icc.NewClient(s.ctx, proxy.Params.EtcdCfg.MetaRootPath, etcdCli)
|
||||
if err != nil {
|
||||
log.Warn("failed to create IndexCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("create IndexCoord client for Proxy done")
|
||||
log.Info("create IndexCoord client for Proxy done")
|
||||
}
|
||||
|
||||
log.Debug("init IndexCoord client for Proxy")
|
||||
log.Info("init IndexCoord client for Proxy")
|
||||
if err := s.indexCoordClient.Init(); err != nil {
|
||||
log.Warn("failed to init IndexCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init IndexCoord client for Proxy done")
|
||||
log.Info("init IndexCoord client for Proxy done")
|
||||
|
||||
log.Debug("Proxy wait for IndexCoord to be healthy")
|
||||
if err := funcutil.WaitForComponentHealthy(s.ctx, s.indexCoordClient, "IndexCoord", 1000000, time.Millisecond*200); err != nil {
|
||||
log.Warn("Proxy failed to wait for IndexCoord to be healthy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("Proxy wait for IndexCoord to be healthy done")
|
||||
log.Info("Proxy wait for IndexCoord to be healthy done")
|
||||
|
||||
log.Debug("set IndexCoord client for Proxy")
|
||||
s.proxy.SetIndexCoordClient(s.indexCoordClient)
|
||||
log.Debug("set IndexCoord client for Proxy done")
|
||||
log.Info("set IndexCoord client for Proxy done")
|
||||
|
||||
if s.queryCoordClient == nil {
|
||||
var err error
|
||||
log.Debug("create QueryCoord client for Proxy")
|
||||
log.Info("create QueryCoord client for Proxy")
|
||||
s.queryCoordClient, err = qcc.NewClient(s.ctx, proxy.Params.EtcdCfg.MetaRootPath, etcdCli)
|
||||
if err != nil {
|
||||
log.Warn("failed to create QueryCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("create QueryCoord client for Proxy done")
|
||||
log.Info("create QueryCoord client for Proxy done")
|
||||
}
|
||||
|
||||
log.Debug("init QueryCoord client for Proxy")
|
||||
log.Info("init QueryCoord client for Proxy")
|
||||
if err := s.queryCoordClient.Init(); err != nil {
|
||||
log.Warn("failed to init QueryCoord client for Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init QueryCoord client for Proxy done")
|
||||
log.Info("init QueryCoord client for Proxy done")
|
||||
|
||||
log.Debug("Proxy wait for QueryCoord to be healthy")
|
||||
if err := funcutil.WaitForComponentHealthy(s.ctx, s.queryCoordClient, "QueryCoord", 1000000, time.Millisecond*200); err != nil {
|
||||
log.Warn("Proxy failed to wait for QueryCoord to be healthy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("Proxy wait for QueryCoord to be healthy done")
|
||||
log.Info("Proxy wait for QueryCoord to be healthy done")
|
||||
|
||||
log.Debug("set QueryCoord client for Proxy")
|
||||
s.proxy.SetQueryCoordClient(s.queryCoordClient)
|
||||
log.Debug("set QueryCoord client for Proxy done")
|
||||
log.Info("set QueryCoord client for Proxy done")
|
||||
|
||||
log.Debug(fmt.Sprintf("update Proxy's state to %s", commonpb.StateCode_Initializing.String()))
|
||||
log.Info(fmt.Sprintf("update Proxy's state to %s", commonpb.StateCode_Initializing.String()))
|
||||
s.proxy.UpdateStateCode(commonpb.StateCode_Initializing)
|
||||
|
||||
log.Debug("init Proxy")
|
||||
log.Info("init Proxy")
|
||||
if err := s.proxy.Init(); err != nil {
|
||||
log.Warn("failed to init Proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("init Proxy done")
|
||||
log.Info("init Proxy done")
|
||||
// Intentionally print to stdout, which is usually a sign that Milvus is ready to serve.
|
||||
fmt.Println("---Milvus Proxy successfully initialized and ready to serve!---")
|
||||
|
||||
@ -503,7 +494,7 @@ func (s *Server) start() error {
|
||||
|
||||
// Stop stop the Proxy Server
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("Proxy stop", zap.String("internal address", Params.GetInternalAddress()), zap.String("external address", Params.GetInternalAddress()))
|
||||
log.Info("Proxy stop", zap.String("internal address", Params.GetInternalAddress()), zap.String("external address", Params.GetInternalAddress()))
|
||||
var err error
|
||||
if s.closer != nil {
|
||||
if err = s.closer.Close(); err != nil {
|
||||
@ -521,11 +512,11 @@ func (s *Server) Stop() error {
|
||||
go func() {
|
||||
defer gracefulWg.Done()
|
||||
if s.grpcInternalServer != nil {
|
||||
log.Debug("Graceful stop grpc internal server...")
|
||||
log.Info("Graceful stop grpc internal server...")
|
||||
s.grpcInternalServer.GracefulStop()
|
||||
}
|
||||
if s.grpcExternalServer != nil {
|
||||
log.Debug("Graceful stop grpc external server...")
|
||||
log.Info("Graceful stop grpc external server...")
|
||||
s.grpcExternalServer.GracefulStop()
|
||||
}
|
||||
}()
|
||||
|
||||
@ -51,7 +51,7 @@ func NewClient(ctx context.Context, metaRoot string, etcdCli *clientv3.Client) (
|
||||
sess := sessionutil.NewSession(ctx, metaRoot, etcdCli)
|
||||
if sess == nil {
|
||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
||||
log.Debug("QueryCoordClient NewClient failed", zap.Error(err))
|
||||
log.Warn("QueryCoordClient NewClient failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
ClientParams.InitOnce(typeutil.QueryCoordRole)
|
||||
@ -86,12 +86,12 @@ func (c *Client) getQueryCoordAddr() (string, error) {
|
||||
key := c.grpcClient.GetRole()
|
||||
msess, _, err := c.sess.GetSessions(key)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoordClient GetSessions failed", zap.Error(err))
|
||||
log.Warn("QueryCoordClient GetSessions failed", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
ms, ok := msess[key]
|
||||
if !ok {
|
||||
log.Debug("QueryCoordClient msess key not existed", zap.Any("key", key))
|
||||
log.Warn("QueryCoordClient msess key not existed", zap.Any("key", key))
|
||||
return "", fmt.Errorf("find no available querycoord, check querycoord state")
|
||||
}
|
||||
return ms.Address, nil
|
||||
|
||||
@ -98,12 +98,12 @@ func (s *Server) Run() error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("QueryCoord init done ...")
|
||||
log.Info("QueryCoord init done ...")
|
||||
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("QueryCoord start done ...")
|
||||
log.Info("QueryCoord start done ...")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ func (s *Server) init() error {
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord connect to etcd failed", zap.Error(err))
|
||||
log.Warn("QueryCoord connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
@ -138,94 +138,94 @@ func (s *Server) init() error {
|
||||
if s.rootCoord == nil {
|
||||
s.rootCoord, err = rcc.NewClient(s.loopCtx, qc.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord try to new RootCoord client failed", zap.Error(err))
|
||||
log.Warn("QueryCoord try to new RootCoord client failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = s.rootCoord.Init(); err != nil {
|
||||
log.Debug("QueryCoord RootCoordClient Init failed", zap.Error(err))
|
||||
log.Warn("QueryCoord RootCoordClient Init failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = s.rootCoord.Start(); err != nil {
|
||||
log.Debug("QueryCoord RootCoordClient Start failed", zap.Error(err))
|
||||
log.Warn("QueryCoord RootCoordClient Start failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
// wait for master init or healthy
|
||||
log.Debug("QueryCoord try to wait for RootCoord ready")
|
||||
log.Info("QueryCoord try to wait for RootCoord ready")
|
||||
err = funcutil.WaitForComponentHealthy(s.loopCtx, s.rootCoord, "RootCoord", 1000000, time.Millisecond*200)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord wait for RootCoord ready failed", zap.Error(err))
|
||||
log.Warn("QueryCoord wait for RootCoord ready failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := s.SetRootCoord(s.rootCoord); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Debug("QueryCoord report RootCoord ready")
|
||||
log.Info("QueryCoord report RootCoord ready")
|
||||
|
||||
// --- Data service client ---
|
||||
if s.dataCoord == nil {
|
||||
s.dataCoord, err = dcc.NewClient(s.loopCtx, qc.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord try to new DataCoord client failed", zap.Error(err))
|
||||
log.Warn("QueryCoord try to new DataCoord client failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = s.dataCoord.Init(); err != nil {
|
||||
log.Debug("QueryCoord DataCoordClient Init failed", zap.Error(err))
|
||||
log.Warn("QueryCoord DataCoordClient Init failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
if err = s.dataCoord.Start(); err != nil {
|
||||
log.Debug("QueryCoord DataCoordClient Start failed", zap.Error(err))
|
||||
log.Warn("QueryCoord DataCoordClient Start failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
log.Debug("QueryCoord try to wait for DataCoord ready")
|
||||
log.Info("QueryCoord try to wait for DataCoord ready")
|
||||
err = funcutil.WaitForComponentHealthy(s.loopCtx, s.dataCoord, "DataCoord", 1000000, time.Millisecond*200)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord wait for DataCoord ready failed", zap.Error(err))
|
||||
log.Warn("QueryCoord wait for DataCoord ready failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
if err := s.SetDataCoord(s.dataCoord); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Debug("QueryCoord report DataCoord ready")
|
||||
log.Info("QueryCoord report DataCoord ready")
|
||||
|
||||
// --- IndexCoord ---
|
||||
if s.indexCoord == nil {
|
||||
s.indexCoord, err = icc.NewClient(s.loopCtx, qc.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord try to new IndexCoord client failed", zap.Error(err))
|
||||
log.Warn("QueryCoord try to new IndexCoord client failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.indexCoord.Init(); err != nil {
|
||||
log.Debug("QueryCoord IndexCoordClient Init failed", zap.Error(err))
|
||||
log.Warn("QueryCoord IndexCoordClient Init failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := s.indexCoord.Start(); err != nil {
|
||||
log.Debug("QueryCoord IndexCoordClient Start failed", zap.Error(err))
|
||||
log.Warn("QueryCoord IndexCoordClient Start failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
// wait IndexCoord healthy
|
||||
log.Debug("QueryCoord try to wait for IndexCoord ready")
|
||||
log.Info("QueryCoord try to wait for IndexCoord ready")
|
||||
err = funcutil.WaitForComponentHealthy(s.loopCtx, s.indexCoord, "IndexCoord", 1000000, time.Millisecond*200)
|
||||
if err != nil {
|
||||
log.Debug("QueryCoord wait for IndexCoord ready failed", zap.Error(err))
|
||||
log.Warn("QueryCoord wait for IndexCoord ready failed", zap.Error(err))
|
||||
panic(err)
|
||||
}
|
||||
log.Debug("QueryCoord report IndexCoord is ready")
|
||||
log.Info("QueryCoord report IndexCoord is ready")
|
||||
|
||||
if err := s.SetIndexCoord(s.indexCoord); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
s.queryCoord.UpdateStateCode(commonpb.StateCode_Initializing)
|
||||
log.Debug("QueryCoord", zap.Any("State", commonpb.StateCode_Initializing))
|
||||
log.Info("QueryCoord", zap.Any("State", commonpb.StateCode_Initializing))
|
||||
if err := s.queryCoord.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -244,10 +244,10 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
Time: 60 * time.Second, // Ping the client if it is idle for 60 seconds to ensure the connection is still active
|
||||
Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead
|
||||
}
|
||||
log.Debug("network", zap.String("port", strconv.Itoa(grpcPort)))
|
||||
log.Info("network", zap.String("port", strconv.Itoa(grpcPort)))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Debug("GrpcServer:failed to listen:", zap.String("error", err.Error()))
|
||||
log.Warn("GrpcServer:failed to listen:", zap.String("error", err.Error()))
|
||||
s.grpcErrChan <- err
|
||||
return
|
||||
}
|
||||
@ -286,7 +286,7 @@ func (s *Server) start() error {
|
||||
|
||||
// Stop stops QueryCoord's grpc service.
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("QueryCoord stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("QueryCoord stop", zap.String("Address", Params.GetAddress()))
|
||||
if s.closer != nil {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
return err
|
||||
@ -298,7 +298,7 @@ func (s *Server) Stop() error {
|
||||
err := s.queryCoord.Stop()
|
||||
s.loopCancel()
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
return err
|
||||
|
||||
@ -103,16 +103,16 @@ func (s *Server) init() error {
|
||||
closer := trace.InitTracing(fmt.Sprintf("query_node ip: %s, port: %d", Params.IP, Params.Port))
|
||||
s.closer = closer
|
||||
|
||||
log.Debug("QueryNode", zap.Int("port", Params.Port))
|
||||
log.Info("QueryNode", zap.Int("port", Params.Port))
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("QueryNode connect to etcd failed", zap.Error(err))
|
||||
log.Warn("QueryNode connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
s.SetEtcdClient(etcdCli)
|
||||
log.Debug("QueryNode connect to etcd successfully")
|
||||
log.Info("QueryNode connect to etcd successfully")
|
||||
s.wg.Add(1)
|
||||
go s.startGrpcLoop(Params.Port)
|
||||
// wait for grpc server loop start
|
||||
@ -122,7 +122,7 @@ func (s *Server) init() error {
|
||||
}
|
||||
|
||||
s.querynode.UpdateStateCode(commonpb.StateCode_Initializing)
|
||||
log.Debug("QueryNode", zap.Any("State", commonpb.StateCode_Initializing))
|
||||
log.Info("QueryNode", zap.Any("State", commonpb.StateCode_Initializing))
|
||||
if err := s.querynode.Init(); err != nil {
|
||||
log.Error("QueryNode init error: ", zap.Error(err))
|
||||
return err
|
||||
@ -194,7 +194,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
log.Debug("QueryNode Start Grpc Failed!!!!")
|
||||
log.Warn("QueryNode Start Grpc Failed!!!!")
|
||||
s.grpcErrChan <- err
|
||||
}
|
||||
|
||||
@ -206,18 +206,18 @@ func (s *Server) Run() error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("QueryNode init done ...")
|
||||
log.Info("QueryNode init done ...")
|
||||
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("QueryNode start done ...")
|
||||
log.Info("QueryNode start done ...")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops QueryNode's grpc service.
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("QueryNode stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("QueryNode stop", zap.String("Address", Params.GetAddress()))
|
||||
if s.closer != nil {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
return err
|
||||
@ -229,7 +229,7 @@ func (s *Server) Stop() error {
|
||||
|
||||
s.cancel()
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ func NewClient(ctx context.Context, metaRoot string, etcdCli *clientv3.Client) (
|
||||
sess := sessionutil.NewSession(ctx, metaRoot, etcdCli)
|
||||
if sess == nil {
|
||||
err := fmt.Errorf("new session error, maybe can not connect to etcd")
|
||||
log.Debug("QueryCoordClient NewClient failed", zap.Error(err))
|
||||
log.Warn("QueryCoordClient NewClient failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
ClientParams.InitOnce(typeutil.RootCoordRole)
|
||||
@ -95,7 +95,7 @@ func (c *Client) getRootCoordAddr() (string, error) {
|
||||
key := c.grpcClient.GetRole()
|
||||
msess, _, err := c.sess.GetSessions(key)
|
||||
if err != nil {
|
||||
log.Debug("RootCoordClient GetSessions failed", zap.Any("key", key))
|
||||
log.Warn("RootCoordClient GetSessions failed", zap.Any("key", key))
|
||||
return "", err
|
||||
}
|
||||
ms, ok := msess[key]
|
||||
@ -103,7 +103,7 @@ func (c *Client) getRootCoordAddr() (string, error) {
|
||||
log.Warn("RootCoordClient mess key not exist", zap.Any("key", key))
|
||||
return "", fmt.Errorf("find no available rootcoord, check rootcoord state")
|
||||
}
|
||||
log.Debug("RootCoordClient GetSessions success", zap.String("address", ms.Address))
|
||||
log.Info("RootCoordClient GetSessions success", zap.String("address", ms.Address))
|
||||
return ms.Address, nil
|
||||
}
|
||||
|
||||
|
||||
@ -142,12 +142,12 @@ func (s *Server) Run() error {
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("RootCoord init done ...")
|
||||
log.Info("RootCoord init done ...")
|
||||
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("RootCoord start done ...")
|
||||
log.Info("RootCoord start done ...")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -157,31 +157,31 @@ func (s *Server) init() error {
|
||||
rootcoord.Params.InitOnce()
|
||||
rootcoord.Params.RootCoordCfg.Address = Params.GetAddress()
|
||||
rootcoord.Params.RootCoordCfg.Port = Params.Port
|
||||
log.Debug("init params done..")
|
||||
log.Info("init params done..")
|
||||
|
||||
closer := trace.InitTracing("root_coord")
|
||||
s.closer = closer
|
||||
|
||||
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
|
||||
if err != nil {
|
||||
log.Debug("RootCoord connect to etcd failed", zap.Error(err))
|
||||
log.Warn("RootCoord connect to etcd failed", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
s.etcdCli = etcdCli
|
||||
s.rootCoord.SetEtcdClient(s.etcdCli)
|
||||
log.Debug("etcd connect done ...")
|
||||
log.Info("etcd connect done ...")
|
||||
|
||||
err = s.startGrpc(Params.Port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("grpc init done ...")
|
||||
log.Info("grpc init done ...")
|
||||
|
||||
s.rootCoord.UpdateStateCode(commonpb.StateCode_Initializing)
|
||||
log.Debug("RootCoord", zap.Any("State", commonpb.StateCode_Initializing))
|
||||
log.Info("RootCoord", zap.Any("State", commonpb.StateCode_Initializing))
|
||||
|
||||
if s.newDataCoordClient != nil {
|
||||
log.Debug("RootCoord start to create DataCoord client")
|
||||
log.Info("RootCoord start to create DataCoord client")
|
||||
dataCoord := s.newDataCoordClient(rootcoord.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err := s.rootCoord.SetDataCoord(s.ctx, dataCoord); err != nil {
|
||||
panic(err)
|
||||
@ -189,7 +189,7 @@ func (s *Server) init() error {
|
||||
s.dataCoord = dataCoord
|
||||
}
|
||||
if s.newIndexCoordClient != nil {
|
||||
log.Debug("RootCoord start to create IndexCoord client")
|
||||
log.Info("RootCoord start to create IndexCoord client")
|
||||
indexCoord := s.newIndexCoordClient(rootcoord.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err := s.rootCoord.SetIndexCoord(indexCoord); err != nil {
|
||||
panic(err)
|
||||
@ -197,7 +197,7 @@ func (s *Server) init() error {
|
||||
s.indexCoord = indexCoord
|
||||
}
|
||||
if s.newQueryCoordClient != nil {
|
||||
log.Debug("RootCoord start to create QueryCoord client")
|
||||
log.Info("RootCoord start to create QueryCoord client")
|
||||
queryCoord := s.newQueryCoordClient(rootcoord.Params.EtcdCfg.MetaRootPath, s.etcdCli)
|
||||
if err := s.rootCoord.SetQueryCoord(queryCoord); err != nil {
|
||||
panic(err)
|
||||
@ -227,7 +227,7 @@ func (s *Server) startGrpcLoop(port int) {
|
||||
Time: 60 * time.Second, // Ping the client if it is idle for 60 seconds to ensure the connection is still active
|
||||
Timeout: 10 * time.Second, // Wait 10 second for the ping ack before assuming the connection is dead
|
||||
}
|
||||
log.Debug("start grpc ", zap.Int("port", port))
|
||||
log.Info("start grpc ", zap.Int("port", port))
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(port))
|
||||
if err != nil {
|
||||
log.Error("GrpcServer:failed to listen", zap.String("error", err.Error()))
|
||||
@ -259,7 +259,7 @@ func (s *Server) startGrpcLoop(port int) {
|
||||
}
|
||||
|
||||
func (s *Server) start() error {
|
||||
log.Debug("RootCoord Core start ...")
|
||||
log.Info("RootCoord Core start ...")
|
||||
if err := s.rootCoord.Start(); err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
@ -273,7 +273,7 @@ func (s *Server) start() error {
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
log.Debug("Rootcoord stop", zap.String("Address", Params.GetAddress()))
|
||||
log.Info("Rootcoord stop", zap.String("Address", Params.GetAddress()))
|
||||
if s.closer != nil {
|
||||
if err := s.closer.Close(); err != nil {
|
||||
log.Error("Failed to close opentracing", zap.Error(err))
|
||||
@ -302,10 +302,10 @@ func (s *Server) Stop() error {
|
||||
log.Error("Failed to close close rootCoord", zap.Error(err))
|
||||
}
|
||||
}
|
||||
log.Debug("Rootcoord begin to stop grpc server")
|
||||
log.Info("Rootcoord begin to stop grpc server")
|
||||
s.cancel()
|
||||
if s.grpcServer != nil {
|
||||
log.Debug("Graceful stop grpc server...")
|
||||
log.Info("Graceful stop grpc server...")
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
s.wg.Wait()
|
||||
|
||||
@ -218,7 +218,7 @@ func (hd *handoff) process(segID UniqueID) {
|
||||
return
|
||||
}
|
||||
if info.IsImporting {
|
||||
log.Debug("segment is importing, can't write handoff event", zap.Int64("segID", segID))
|
||||
log.Info("segment is importing, can't write handoff event", zap.Int64("segID", segID))
|
||||
return
|
||||
}
|
||||
if hd.allParentsDone(info.CompactionFrom) {
|
||||
|
||||
@ -174,7 +174,7 @@ func (i *IndexCoord) Init() error {
|
||||
Params.InitOnce()
|
||||
i.initOnce.Do(func() {
|
||||
i.UpdateStateCode(commonpb.StateCode_Initializing)
|
||||
log.Debug("IndexCoord init", zap.Any("stateCode", i.stateCode.Load().(commonpb.StateCode)))
|
||||
log.Info("IndexCoord init", zap.Any("stateCode", i.stateCode.Load().(commonpb.StateCode)))
|
||||
|
||||
i.factory.Init(&Params)
|
||||
|
||||
@ -190,7 +190,7 @@ func (i *IndexCoord) Init() error {
|
||||
i.metaTable, err = NewMetaTable(i.etcdKV)
|
||||
return err
|
||||
}
|
||||
log.Debug("IndexCoord try to connect etcd")
|
||||
log.Info("IndexCoord try to connect etcd")
|
||||
err = retry.Do(i.loopCtx, connectEtcdFn, retry.Attempts(100))
|
||||
if err != nil {
|
||||
log.Error("IndexCoord try to connect etcd failed", zap.Error(err))
|
||||
@ -198,11 +198,11 @@ func (i *IndexCoord) Init() error {
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("IndexCoord try to connect etcd success")
|
||||
log.Info("IndexCoord try to connect etcd success")
|
||||
i.nodeManager = NewNodeManager(i.loopCtx)
|
||||
|
||||
sessions, revision, err := i.session.GetSessions(typeutil.IndexNodeRole)
|
||||
log.Debug("IndexCoord", zap.Int("session number", len(sessions)), zap.Int64("revision", revision))
|
||||
log.Info("IndexCoord", zap.Int("session number", len(sessions)), zap.Int64("revision", revision))
|
||||
if err != nil {
|
||||
log.Error("IndexCoord Get IndexNode Sessions error", zap.Error(err))
|
||||
initErr = err
|
||||
@ -218,7 +218,7 @@ func (i *IndexCoord) Init() error {
|
||||
initErr = err
|
||||
return
|
||||
}
|
||||
log.Debug("IndexCoord add node success", zap.String("IndexNode address", Params.IndexCoordCfg.IndexNodeAddress),
|
||||
log.Info("IndexCoord add node success", zap.String("IndexNode address", Params.IndexCoordCfg.IndexNodeAddress),
|
||||
zap.Int64("nodeID", Params.IndexCoordCfg.IndexNodeID))
|
||||
aliveNodeID = append(aliveNodeID, Params.IndexCoordCfg.IndexNodeID)
|
||||
metrics.IndexCoordIndexNodeNum.WithLabelValues().Inc()
|
||||
@ -233,7 +233,7 @@ func (i *IndexCoord) Init() error {
|
||||
aliveNodeID = append(aliveNodeID, session.ServerID)
|
||||
}
|
||||
}
|
||||
log.Debug("IndexCoord", zap.Int("IndexNode number", len(i.nodeManager.GetAllClients())))
|
||||
log.Info("IndexCoord", zap.Int("IndexNode number", len(i.nodeManager.GetAllClients())))
|
||||
i.indexBuilder = newIndexBuilder(i.loopCtx, i, i.metaTable, aliveNodeID)
|
||||
|
||||
// TODO silverxia add Rewatch logic
|
||||
@ -245,7 +245,7 @@ func (i *IndexCoord) Init() error {
|
||||
initErr = err
|
||||
return
|
||||
}
|
||||
log.Debug("IndexCoord new minio chunkManager success")
|
||||
log.Info("IndexCoord new minio chunkManager success")
|
||||
i.chunkManager = chunkManager
|
||||
|
||||
i.garbageCollector = newGarbageCollector(i.loopCtx, i.metaTable, i.chunkManager, i)
|
||||
@ -262,12 +262,12 @@ func (i *IndexCoord) Init() error {
|
||||
initErr = err
|
||||
return
|
||||
}
|
||||
log.Debug("IndexCoord new task scheduler success")
|
||||
log.Info("IndexCoord new task scheduler success")
|
||||
|
||||
i.metricsCacheManager = metricsinfo.NewMetricsCacheManager()
|
||||
})
|
||||
|
||||
log.Debug("IndexCoord init finished", zap.Error(initErr))
|
||||
log.Info("IndexCoord init finished", zap.Error(initErr))
|
||||
|
||||
return initErr
|
||||
}
|
||||
@ -436,7 +436,7 @@ func (i *IndexCoord) CreateIndex(ctx context.Context, req *indexpb.CreateIndexRe
|
||||
Reason: msgIndexCoordIsUnhealthy(i.serverID),
|
||||
}, nil
|
||||
}
|
||||
log.Debug("IndexCoord receive create index request", zap.Int64("CollectionID", req.CollectionID),
|
||||
log.Info("IndexCoord receive create index request", zap.Int64("CollectionID", req.CollectionID),
|
||||
zap.String("IndexName", req.IndexName), zap.Int64("fieldID", req.FieldID),
|
||||
zap.Any("TypeParams", req.TypeParams),
|
||||
zap.Any("IndexParams", req.IndexParams))
|
||||
@ -470,7 +470,7 @@ func (i *IndexCoord) CreateIndex(ctx context.Context, req *indexpb.CreateIndexRe
|
||||
ret.Reason = err.Error()
|
||||
return ret, nil
|
||||
}
|
||||
log.Debug("IndexCoord create index enqueue successfully", zap.Int64("IndexID", t.indexID))
|
||||
log.Info("IndexCoord create index enqueue successfully", zap.Int64("IndexID", t.indexID))
|
||||
|
||||
err = t.WaitToFinish()
|
||||
if err != nil {
|
||||
@ -617,7 +617,7 @@ func (i *IndexCoord) completeIndexInfo(indexInfo *indexpb.IndexInfo, segIDs []Un
|
||||
indexInfo.IndexedRows = i.metaTable.GetIndexBuildProgress(indexID, segIDs)
|
||||
}
|
||||
|
||||
log.Debug("IndexCoord completeIndexInfo success", zap.Int64("collID", collectionID),
|
||||
log.Info("IndexCoord completeIndexInfo success", zap.Int64("collID", collectionID),
|
||||
zap.Int64("totalRows", indexInfo.TotalRows), zap.Int64("indexRows", indexInfo.IndexedRows),
|
||||
zap.Any("state", indexInfo.State), zap.String("failReason", indexInfo.IndexStateFailReason))
|
||||
return nil
|
||||
@ -825,7 +825,7 @@ func (i *IndexCoord) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInf
|
||||
|
||||
// DescribeIndex describe the index info of the collection.
|
||||
func (i *IndexCoord) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRequest) (*indexpb.DescribeIndexResponse, error) {
|
||||
log.Debug("IndexCoord DescribeIndex", zap.Int64("collectionID", req.CollectionID), zap.String("indexName", req.GetIndexName()))
|
||||
log.Info("IndexCoord DescribeIndex", zap.Int64("collectionID", req.CollectionID), zap.String("indexName", req.GetIndexName()))
|
||||
if !i.isHealthy() {
|
||||
log.Warn(msgIndexCoordIsUnhealthy(i.serverID))
|
||||
return &indexpb.DescribeIndexResponse{
|
||||
@ -1057,7 +1057,6 @@ func (i *IndexCoord) watchNodeLoop() {
|
||||
|
||||
defer cancel()
|
||||
defer i.loopWg.Done()
|
||||
log.Debug("IndexCoord watchNodeLoop start")
|
||||
|
||||
for {
|
||||
select {
|
||||
@ -1078,11 +1077,10 @@ func (i *IndexCoord) watchNodeLoop() {
|
||||
if Params.IndexCoordCfg.BindIndexNodeMode {
|
||||
continue
|
||||
}
|
||||
log.Debug("IndexCoord watchNodeLoop event updated")
|
||||
switch event.EventType {
|
||||
case sessionutil.SessionAddEvent:
|
||||
serverID := event.Session.ServerID
|
||||
log.Debug("IndexCoord watchNodeLoop SessionAddEvent", zap.Int64("serverID", serverID),
|
||||
log.Info("IndexCoord watchNodeLoop SessionAddEvent", zap.Int64("serverID", serverID),
|
||||
zap.String("address", event.Session.Address))
|
||||
go func() {
|
||||
err := i.nodeManager.AddNode(serverID, event.Session.Address)
|
||||
@ -1093,7 +1091,7 @@ func (i *IndexCoord) watchNodeLoop() {
|
||||
i.metricsCacheManager.InvalidateSystemInfoMetrics()
|
||||
case sessionutil.SessionDelEvent:
|
||||
serverID := event.Session.ServerID
|
||||
log.Debug("IndexCoord watchNodeLoop SessionDelEvent", zap.Int64("serverID", serverID))
|
||||
log.Info("IndexCoord watchNodeLoop SessionDelEvent", zap.Int64("serverID", serverID))
|
||||
i.nodeManager.RemoveNode(serverID)
|
||||
// remove tasks on nodeID
|
||||
i.indexBuilder.nodeDown(serverID)
|
||||
@ -1183,7 +1181,7 @@ func (i *IndexCoord) createIndexForSegment(segIdx *model.SegmentIndex) (bool, Un
|
||||
|
||||
hasIndex, indexBuildID := i.metaTable.HasSameIndex(segIdx.SegmentID, segIdx.IndexID)
|
||||
if hasIndex {
|
||||
log.Debug("IndexCoord has same index", zap.Int64("buildID", indexBuildID), zap.Int64("segmentID", segIdx.SegmentID))
|
||||
log.Info("IndexCoord has same index", zap.Int64("buildID", indexBuildID), zap.Int64("segmentID", segIdx.SegmentID))
|
||||
return true, indexBuildID, nil
|
||||
}
|
||||
|
||||
@ -1206,7 +1204,7 @@ func (i *IndexCoord) createIndexForSegment(segIdx *model.SegmentIndex) (bool, Un
|
||||
zap.Int64("segID", segIdx.SegmentID), zap.Error(err))
|
||||
return false, 0, err
|
||||
}
|
||||
log.Debug("IndexCoord createIndex Enqueue successfully", zap.Int64("collID", segIdx.CollectionID),
|
||||
log.Info("IndexCoord createIndex Enqueue successfully", zap.Int64("collID", segIdx.CollectionID),
|
||||
zap.Int64("segID", segIdx.SegmentID), zap.Int64("IndexBuildID", t.segmentIndex.BuildID))
|
||||
|
||||
err = t.WaitToFinish()
|
||||
@ -1267,12 +1265,12 @@ func (i *IndexCoord) watchFlushedSegmentLoop() {
|
||||
segmentInfo.ID = segID
|
||||
}
|
||||
|
||||
log.Debug("watchFlushedSegmentLoop watch event",
|
||||
log.Info("watchFlushedSegmentLoop watch event",
|
||||
zap.Int64("segID", segmentInfo.GetID()),
|
||||
zap.Any("isFake", segmentInfo.GetIsFake()))
|
||||
i.flushedSegmentWatcher.enqueueInternalTask(segmentInfo)
|
||||
case mvccpb.DELETE:
|
||||
log.Debug("the segment info has been deleted", zap.String("key", string(event.Kv.Key)))
|
||||
log.Info("the segment info has been deleted", zap.String("key", string(event.Kv.Key)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,17 +338,17 @@ func (mt *metaTable) NeedIndex(collID, indexID UniqueID) bool {
|
||||
|
||||
func (mt *metaTable) canIndex(segIdx *model.SegmentIndex) bool {
|
||||
if segIdx.IsDeleted {
|
||||
log.Debug("Index has been deleted", zap.Int64("buildID", segIdx.BuildID))
|
||||
log.Info("Index has been deleted", zap.Int64("buildID", segIdx.BuildID))
|
||||
return false
|
||||
}
|
||||
|
||||
if segIdx.NodeID != 0 {
|
||||
log.Debug("IndexCoord metaTable BuildIndex, but indexMeta's NodeID is not zero",
|
||||
log.Info("IndexCoord metaTable BuildIndex, but indexMeta's NodeID is not zero",
|
||||
zap.Int64("buildID", segIdx.BuildID), zap.Int64("nodeID", segIdx.NodeID))
|
||||
return false
|
||||
}
|
||||
if segIdx.IndexState != commonpb.IndexState_Unissued {
|
||||
log.Debug("IndexCoord metaTable BuildIndex, but indexMeta's state is not unissued",
|
||||
log.Info("IndexCoord metaTable BuildIndex, but indexMeta's state is not unissued",
|
||||
zap.Int64("buildID", segIdx.BuildID), zap.String("state", segIdx.IndexState.String()))
|
||||
return false
|
||||
}
|
||||
@ -361,7 +361,7 @@ func (mt *metaTable) UpdateVersion(buildID UniqueID, nodeID UniqueID) error {
|
||||
mt.segmentIndexLock.Lock()
|
||||
defer mt.segmentIndexLock.Unlock()
|
||||
|
||||
log.Debug("IndexCoord metaTable UpdateVersion receive", zap.Int64("buildID", buildID), zap.Int64("nodeID", nodeID))
|
||||
log.Info("IndexCoord metaTable UpdateVersion receive", zap.Int64("buildID", buildID), zap.Int64("nodeID", nodeID))
|
||||
segIdx, ok := mt.buildID2SegmentIndex[buildID]
|
||||
if !ok {
|
||||
return fmt.Errorf("there is no index with buildID: %d", buildID)
|
||||
@ -523,7 +523,7 @@ func (mt *metaTable) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID
|
||||
if !mt.checkParams(fieldIndex, req) {
|
||||
continue
|
||||
}
|
||||
log.Debug("IndexCoord has same index", zap.Int64("collectionID", req.CollectionID),
|
||||
log.Info("IndexCoord has same index", zap.Int64("collectionID", req.CollectionID),
|
||||
zap.Int64("fieldID", req.FieldID), zap.String("indexName", req.IndexName),
|
||||
zap.Int64("indexID", fieldIndex.IndexID))
|
||||
return true, fieldIndex.IndexID
|
||||
@ -787,7 +787,7 @@ func (mt *metaTable) MarkSegmentsIndexAsDeleted(selector func(index *model.Segme
|
||||
}
|
||||
|
||||
if len(segIdxes) == 0 {
|
||||
log.Debug("IndexCoord metaTable MarkSegmentsIndexAsDeleted success, no segment index need to mark")
|
||||
log.Info("IndexCoord metaTable MarkSegmentsIndexAsDeleted success, no segment index need to mark")
|
||||
return nil
|
||||
}
|
||||
err := mt.alterSegmentIndexes(segIdxes)
|
||||
@ -800,7 +800,7 @@ func (mt *metaTable) MarkSegmentsIndexAsDeleted(selector func(index *model.Segme
|
||||
func (mt *metaTable) GetSegmentIndexByBuildID(buildID UniqueID) (bool, *model.SegmentIndex) {
|
||||
mt.segmentIndexLock.RLock()
|
||||
defer mt.segmentIndexLock.RUnlock()
|
||||
log.Debug("IndexCoord get index file path from meta table", zap.Int64("buildID", buildID))
|
||||
log.Info("IndexCoord get index file path from meta table", zap.Int64("buildID", buildID))
|
||||
|
||||
segIdx, ok := mt.buildID2SegmentIndex[buildID]
|
||||
if !ok || segIdx.IsDeleted {
|
||||
@ -811,7 +811,7 @@ func (mt *metaTable) GetSegmentIndexByBuildID(buildID UniqueID) (bool, *model.Se
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Debug("IndexCoord get segment index file path success", zap.Int64("buildID", buildID),
|
||||
log.Info("IndexCoord get segment index file path success", zap.Int64("buildID", buildID),
|
||||
zap.Int("index files num", len(segIdx.IndexFileKeys)))
|
||||
return true, segIdx
|
||||
}
|
||||
@ -1059,7 +1059,7 @@ func (mt *metaTable) MarkSegmentsIndexAsDeletedByBuildID(buildIDs []UniqueID) er
|
||||
}
|
||||
}
|
||||
if len(segIdxes) == 0 {
|
||||
log.Debug("IndexCoord metaTable MarkSegmentsIndexAsDeletedByBuildID success, already have deleted",
|
||||
log.Info("IndexCoord metaTable MarkSegmentsIndexAsDeletedByBuildID success, already have deleted",
|
||||
zap.Int64s("buildIDs", buildIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -63,14 +63,14 @@ func (nm *NodeManager) setClient(nodeID UniqueID, client types.IndexNode) {
|
||||
}
|
||||
nm.lock.Lock()
|
||||
nm.nodeClients[nodeID] = client
|
||||
log.Debug("IndexNode NodeManager setClient success", zap.Int64("nodeID", nodeID), zap.Int("IndexNode num", len(nm.nodeClients)))
|
||||
log.Info("IndexNode NodeManager setClient success", zap.Int64("nodeID", nodeID), zap.Int("IndexNode num", len(nm.nodeClients)))
|
||||
nm.lock.Unlock()
|
||||
nm.pq.Push(item)
|
||||
}
|
||||
|
||||
// RemoveNode removes the unused client of IndexNode.
|
||||
func (nm *NodeManager) RemoveNode(nodeID UniqueID) {
|
||||
log.Debug("IndexCoord", zap.Any("Remove node with ID", nodeID))
|
||||
log.Info("IndexCoord", zap.Any("Remove node with ID", nodeID))
|
||||
nm.lock.Lock()
|
||||
delete(nm.nodeClients, nodeID)
|
||||
nm.lock.Unlock()
|
||||
@ -81,7 +81,7 @@ func (nm *NodeManager) RemoveNode(nodeID UniqueID) {
|
||||
// AddNode adds the client of IndexNode.
|
||||
func (nm *NodeManager) AddNode(nodeID UniqueID, address string) error {
|
||||
|
||||
log.Debug("IndexCoord addNode", zap.Any("nodeID", nodeID), zap.Any("node address", address))
|
||||
log.Info("IndexCoord addNode", zap.Any("nodeID", nodeID), zap.Any("node address", address))
|
||||
if nm.pq.CheckExist(nodeID) {
|
||||
log.Warn("IndexCoord", zap.Any("Node client already exist with ID:", nodeID))
|
||||
return nil
|
||||
|
||||
@ -171,7 +171,7 @@ func (cit *CreateIndexTask) Execute(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("IndexCoord get flushed segment from DataCoord success", zap.Int64("collectionID", cit.req.CollectionID),
|
||||
log.Info("IndexCoord get flushed segment from DataCoord success", zap.Int64("collectionID", cit.req.CollectionID),
|
||||
zap.Int64s("flushed segments", flushedSegments.Segments))
|
||||
segmentsInfo, err := cit.dataCoordClient.GetSegmentInfo(cit.ctx, &datapb.GetSegmentInfoRequest{
|
||||
SegmentIDs: flushedSegments.Segments,
|
||||
|
||||
@ -136,7 +136,7 @@ func (queue *BaseTaskQueue) PopActiveTask(tID UniqueID) task {
|
||||
delete(queue.activeTasks, tID)
|
||||
return t
|
||||
}
|
||||
log.Debug("indexcoord", zap.Int64("sorry, but the ID was not found in the active task list!", tID))
|
||||
log.Warn("failed to pop active task", zap.Int64("the task ID was not found in the active task list!", tID))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -107,7 +107,7 @@ type IndexNode struct {
|
||||
|
||||
// NewIndexNode creates a new IndexNode component.
|
||||
func NewIndexNode(ctx context.Context, factory dependency.Factory) (*IndexNode, error) {
|
||||
log.Debug("New IndexNode ...")
|
||||
log.Info("New IndexNode ...")
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
b := &IndexNode{
|
||||
|
||||
@ -104,7 +104,7 @@ func (queue *IndexTaskQueue) AddActiveTask(t task) {
|
||||
tName := t.Name()
|
||||
_, ok := queue.activeTasks[tName]
|
||||
if ok {
|
||||
log.Debug("IndexNode task already in active task list", zap.Any("TaskID", tName))
|
||||
log.Info("IndexNode task already in active task list", zap.Any("TaskID", tName))
|
||||
}
|
||||
|
||||
queue.activeTasks[tName] = t
|
||||
@ -120,7 +120,7 @@ func (queue *IndexTaskQueue) PopActiveTask(tName string) task {
|
||||
delete(queue.activeTasks, tName)
|
||||
return t
|
||||
}
|
||||
log.Debug("IndexNode task was not found in the active task list", zap.String("TaskName", tName))
|
||||
log.Info("IndexNode task was not found in the active task list", zap.String("TaskName", tName))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ func (sched *TaskScheduler) processTask(t task, q TaskQueue) {
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) indexBuildLoop() {
|
||||
log.Debug("IndexNode TaskScheduler start build loop ...")
|
||||
log.Info("IndexNode TaskScheduler start build loop ...")
|
||||
defer sched.wg.Done()
|
||||
for {
|
||||
select {
|
||||
|
||||
@ -38,7 +38,7 @@ func (i *IndexNode) storeTaskState(ClusterID string, buildID UniqueID, state com
|
||||
i.stateLock.Lock()
|
||||
defer i.stateLock.Unlock()
|
||||
if task, ok := i.tasks[key]; ok {
|
||||
log.Debug("IndexNode store task state", zap.String("clusterID", ClusterID), zap.Int64("buildID", buildID),
|
||||
log.Info("IndexNode store task state", zap.String("clusterID", ClusterID), zap.Int64("buildID", buildID),
|
||||
zap.String("state", state.String()), zap.String("fail reason", failReason))
|
||||
task.state = state
|
||||
task.failReason = failReason
|
||||
|
||||
@ -115,7 +115,7 @@ func (ms *MetaSnapshot) loadTs() error {
|
||||
return nil
|
||||
}
|
||||
if curVer == version {
|
||||
log.Debug("Snapshot found save version with different revision", zap.Int64("revision", revision), zap.Int64("version", version))
|
||||
log.Info("Snapshot found save version with different revision", zap.Int64("revision", revision), zap.Int64("version", version))
|
||||
}
|
||||
strTs := string(resp.Kvs[0].Value)
|
||||
if strTs == "0" {
|
||||
@ -148,7 +148,7 @@ func (ms *MetaSnapshot) minTs() typeutil.Timestamp {
|
||||
}
|
||||
|
||||
func (ms *MetaSnapshot) initTs(rev int64, ts typeutil.Timestamp) {
|
||||
log.Debug("init meta Snapshot ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
|
||||
log.Info("init meta Snapshot ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
|
||||
if ms.numTs == 0 {
|
||||
ms.maxPos = len(ms.ts2Rev) - 1
|
||||
ms.minPos = len(ms.ts2Rev) - 1
|
||||
@ -164,7 +164,7 @@ func (ms *MetaSnapshot) initTs(rev int64, ts typeutil.Timestamp) {
|
||||
}
|
||||
|
||||
func (ms *MetaSnapshot) putTs(rev int64, ts typeutil.Timestamp) {
|
||||
log.Debug("put meta snapshto ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
|
||||
log.Info("put meta snapshto ts", zap.Int64("rev", rev), zap.Uint64("ts", ts))
|
||||
ms.maxPos++
|
||||
if ms.maxPos == len(ms.ts2Rev) {
|
||||
ms.maxPos = 0
|
||||
@ -246,7 +246,7 @@ func (ms *MetaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 {
|
||||
for rev--; rev >= 2; rev-- {
|
||||
resp, err := ms.cli.Get(ctx, path.Join(ms.root, ms.tsKey), clientv3.WithRev(rev))
|
||||
if err != nil {
|
||||
log.Debug("get ts from etcd failed", zap.Error(err))
|
||||
log.Info("get ts from etcd failed", zap.Error(err))
|
||||
return 0
|
||||
}
|
||||
if len(resp.Kvs) <= 0 {
|
||||
@ -255,7 +255,7 @@ func (ms *MetaSnapshot) getRevOnEtcd(ts typeutil.Timestamp, rev int64) int64 {
|
||||
rev = resp.Kvs[0].ModRevision
|
||||
curTs, err := strconv.ParseUint(string(resp.Kvs[0].Value), 10, 64)
|
||||
if err != nil {
|
||||
log.Debug("parse timestam error", zap.String("input", string(resp.Kvs[0].Value)), zap.Error(err))
|
||||
log.Info("parse timestam error", zap.String("input", string(resp.Kvs[0].Value)), zap.Error(err))
|
||||
return 0
|
||||
}
|
||||
if curTs <= ts {
|
||||
|
||||
@ -79,7 +79,7 @@ func (c *client) Subscribe(options ConsumerOptions) (Consumer, error) {
|
||||
return nil, err
|
||||
}
|
||||
if exist {
|
||||
log.Debug("ConsumerGroup already existed", zap.Any("topic", options.Topic), zap.Any("SubscriptionName", options.SubscriptionName))
|
||||
log.Info("ConsumerGroup already existed", zap.Any("topic", options.Topic), zap.Any("SubscriptionName", options.SubscriptionName))
|
||||
consumer, err := getExistedConsumer(c, options, con.MsgMutex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -138,7 +138,7 @@ func (c *client) consume(consumer *consumer) {
|
||||
case _, ok := <-consumer.MsgMutex():
|
||||
if !ok {
|
||||
// consumer MsgMutex closed, goroutine exit
|
||||
log.Debug("Consumer MsgMutex closed")
|
||||
log.Info("Consumer MsgMutex closed")
|
||||
return
|
||||
}
|
||||
c.deliver(consumer, 100)
|
||||
|
||||
@ -52,7 +52,7 @@ func InitRocksMQ(path string) error {
|
||||
var finalErr error
|
||||
once.Do(func() {
|
||||
params.Init()
|
||||
log.Debug("initializing global rmq", zap.String("path", path))
|
||||
log.Info("initializing global rmq", zap.String("path", path))
|
||||
var fi os.FileInfo
|
||||
fi, finalErr = os.Stat(path)
|
||||
if os.IsNotExist(finalErr) {
|
||||
@ -85,7 +85,7 @@ func InitRocksMQ(path string) error {
|
||||
|
||||
// CloseRocksMQ is used to close global rocksmq
|
||||
func CloseRocksMQ() {
|
||||
log.Debug("Close Rocksmq!")
|
||||
log.Info("Close Rocksmq!")
|
||||
if Rmq != nil && Rmq.store != nil {
|
||||
Rmq.Close()
|
||||
}
|
||||
|
||||
@ -151,7 +151,7 @@ func NewRocksMQ(params paramtable.BaseTable, name string, idAllocator allocator.
|
||||
rocksDBLRUCacheCapacity = calculatedCapacity
|
||||
}
|
||||
}
|
||||
log.Debug("Start rocksmq ", zap.Int("max proc", maxProcs),
|
||||
log.Info("Start rocksmq ", zap.Int("max proc", maxProcs),
|
||||
zap.Int("parallism", parallelism), zap.Uint64("lru cache", rocksDBLRUCacheCapacity))
|
||||
bbto := gorocksdb.NewDefaultBlockBasedTableOptions()
|
||||
bbto.SetBlockCache(gorocksdb.NewLRUCache(rocksDBLRUCacheCapacity))
|
||||
@ -274,7 +274,7 @@ func (rmq *rocksmq) Close() {
|
||||
log.Info("Successfully close rocksmq")
|
||||
}
|
||||
|
||||
//print rmq consumer Info
|
||||
// print rmq consumer Info
|
||||
func (rmq *rocksmq) Info() bool {
|
||||
rtn := true
|
||||
rmq.consumers.Range(func(key, vals interface{}) bool {
|
||||
@ -377,7 +377,7 @@ func (rmq *rocksmq) CreateTopic(topicName string) error {
|
||||
rmq.retentionInfo.mutex.Lock()
|
||||
defer rmq.retentionInfo.mutex.Unlock()
|
||||
rmq.retentionInfo.topicRetetionTime.Store(topicName, time.Now().Unix())
|
||||
log.Debug("Rocksmq create topic successfully ", zap.String("topic", topicName), zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
log.Info("Rocksmq create topic successfully ", zap.String("topic", topicName), zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -441,7 +441,7 @@ func (rmq *rocksmq) DestroyTopic(topicName string) error {
|
||||
topicMu.Delete(topicName)
|
||||
rmq.retentionInfo.topicRetetionTime.Delete(topicName)
|
||||
|
||||
log.Debug("Rocksmq destroy topic successfully ", zap.String("topic", topicName), zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
log.Info("Rocksmq destroy topic successfully ", zap.String("topic", topicName), zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -473,7 +473,7 @@ func (rmq *rocksmq) CreateConsumerGroup(topicName, groupName string) error {
|
||||
return fmt.Errorf("RMQ CreateConsumerGroup key already exists, key = %s", key)
|
||||
}
|
||||
rmq.consumersID.Store(key, DefaultMessageID)
|
||||
log.Debug("Rocksmq create consumer group successfully ", zap.String("topic", topicName),
|
||||
log.Info("Rocksmq create consumer group successfully ", zap.String("topic", topicName),
|
||||
zap.String("group", groupName),
|
||||
zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
@ -499,7 +499,7 @@ func (rmq *rocksmq) RegisterConsumer(consumer *Consumer) error {
|
||||
consumers[0] = consumer
|
||||
rmq.consumers.Store(consumer.Topic, consumers)
|
||||
}
|
||||
log.Debug("Rocksmq register consumer successfully ", zap.String("topic", consumer.Topic), zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
log.Info("Rocksmq register consumer successfully ", zap.String("topic", consumer.Topic), zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -549,7 +549,7 @@ func (rmq *rocksmq) destroyConsumerGroupInternal(topicName, groupName string) er
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debug("Rocksmq destroy consumer group successfully ", zap.String("topic", topicName),
|
||||
log.Info("Rocksmq destroy consumer group successfully ", zap.String("topic", topicName),
|
||||
zap.String("group", groupName),
|
||||
zap.Int64("elapsed", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
@ -846,11 +846,11 @@ func (rmq *rocksmq) Seek(topicName string, groupName string, msgID UniqueID) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("successfully seek", zap.String("topic", topicName), zap.String("group", groupName), zap.Uint64("msgId", uint64(msgID)))
|
||||
log.Info("successfully seek", zap.String("topic", topicName), zap.String("group", groupName), zap.Uint64("msgId", uint64(msgID)))
|
||||
return nil
|
||||
}
|
||||
|
||||
//Only for test
|
||||
// Only for test
|
||||
func (rmq *rocksmq) ForceSeek(topicName string, groupName string, msgID UniqueID) error {
|
||||
log.Warn("Use method ForceSeek that only for test")
|
||||
if rmq.isClosed() {
|
||||
@ -878,7 +878,7 @@ func (rmq *rocksmq) ForceSeek(topicName string, groupName string, msgID UniqueID
|
||||
|
||||
rmq.consumersID.Store(key, msgID)
|
||||
|
||||
log.Debug("successfully force seek", zap.String("topic", topicName),
|
||||
log.Info("successfully force seek", zap.String("topic", topicName),
|
||||
zap.String("group", groupName), zap.Uint64("msgID", uint64(msgID)))
|
||||
return nil
|
||||
}
|
||||
@ -908,7 +908,7 @@ func (rmq *rocksmq) SeekToLatest(topicName, groupName string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("successfully seek to latest", zap.String("topic", topicName),
|
||||
log.Info("successfully seek to latest", zap.String("topic", topicName),
|
||||
zap.String("group", groupName), zap.Uint64("latest", uint64(msgID+1)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -509,15 +509,17 @@ func TestRocksmq_Goroutines(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
/**
|
||||
This test is aim to measure RocksMq throughout.
|
||||
Hardware:
|
||||
CPU Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
|
||||
Disk SSD
|
||||
/*
|
||||
*
|
||||
|
||||
Test with 1,000,000 message, result is as follow:
|
||||
Produce: 190000 message / s
|
||||
Consume: 90000 message / s
|
||||
This test is aim to measure RocksMq throughout.
|
||||
Hardware:
|
||||
CPU Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz
|
||||
Disk SSD
|
||||
|
||||
Test with 1,000,000 message, result is as follow:
|
||||
Produce: 190000 message / s
|
||||
Consume: 90000 message / s
|
||||
*/
|
||||
func TestRocksmq_Throughout(t *testing.T) {
|
||||
ep := etcdEndpoints()
|
||||
|
||||
@ -96,7 +96,7 @@ func (ri *retentionInfo) startRetentionInfo() {
|
||||
|
||||
// retention do time ticker and trigger retention check and operation for each topic
|
||||
func (ri *retentionInfo) retention() error {
|
||||
log.Debug("Rocksmq retention goroutine start!")
|
||||
log.Info("Rocksmq retention goroutine start!")
|
||||
// Do retention check every 10 mins
|
||||
ticker := time.NewTicker(time.Duration(atomic.LoadInt64(&TickerTimeInSeconds) * int64(time.Second)))
|
||||
defer ticker.Stop()
|
||||
@ -166,7 +166,7 @@ func (ri *retentionInfo) expiredCleanUp(topic string) error {
|
||||
}
|
||||
// Quick Path, No page to check
|
||||
if totalAckedSize == 0 {
|
||||
log.Debug("All messages are not expired, skip retention because no ack", zap.Any("topic", topic),
|
||||
log.Info("All messages are not expired, skip retention because no ack", zap.Any("topic", topic),
|
||||
zap.Any("time taken", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
@ -219,7 +219,7 @@ func (ri *retentionInfo) expiredCleanUp(topic string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("Expired check by retention time", zap.Any("topic", topic),
|
||||
log.Info("Expired check by retention time", zap.Any("topic", topic),
|
||||
zap.Any("pageEndID", pageEndID), zap.Any("deletedAckedSize", deletedAckedSize),
|
||||
zap.Any("pageCleaned", pageCleaned), zap.Any("time taken", time.Since(start).Milliseconds()))
|
||||
|
||||
@ -254,11 +254,11 @@ func (ri *retentionInfo) expiredCleanUp(topic string) error {
|
||||
}
|
||||
|
||||
if pageEndID == 0 {
|
||||
log.Debug("All messages are not expired, skip retention", zap.Any("topic", topic), zap.Any("time taken", time.Since(start).Milliseconds()))
|
||||
log.Info("All messages are not expired, skip retention", zap.Any("topic", topic), zap.Any("time taken", time.Since(start).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
expireTime := time.Since(start).Milliseconds()
|
||||
log.Debug("Expired check by message size: ", zap.Any("topic", topic),
|
||||
log.Info("Expired check by message size: ", zap.Any("topic", topic),
|
||||
zap.Any("pageEndID", pageEndID), zap.Any("deletedAckedSize", deletedAckedSize),
|
||||
zap.Any("pageCleaned", pageCleaned), zap.Any("time taken", expireTime))
|
||||
return ri.cleanData(topic, pageEndID)
|
||||
|
||||
@ -795,7 +795,7 @@ func (ms *MqTtMsgStream) consumeToTtMsg(consumer mqwrapper.Consumer) {
|
||||
return
|
||||
case msg, ok := <-consumer.Chan():
|
||||
if !ok {
|
||||
log.Debug("consumer closed!")
|
||||
log.Info("consumer closed!")
|
||||
return
|
||||
}
|
||||
consumer.Ack(msg)
|
||||
|
||||
@ -1028,23 +1028,22 @@ func sendMsgPacks(ms MsgStream, msgPacks []*MsgPack) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// This testcase will generate MsgPacks as following:
|
||||
//
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
// c1 |----------|----------|----------|----------|----------|----------|
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
// c1 |----------|----------|----------|----------|----------|----------|
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
//
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
// c2 |----------|----------|----------|----------|----------|----------|
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
//
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
// c2 |----------|----------|----------|----------|----------|----------|
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
// Then check:
|
||||
// 1. For each msg in MsgPack received by ttMsgStream consumer, there should be
|
||||
// msgPack.BeginTs < msg.BeginTs() <= msgPack.EndTs
|
||||
// 2. The count of consumed msg should be equal to the count of produced msg
|
||||
//
|
||||
// 1. For each msg in MsgPack received by ttMsgStream consumer, there should be
|
||||
// msgPack.BeginTs < msg.BeginTs() <= msgPack.EndTs
|
||||
// 2. The count of consumed msg should be equal to the count of produced msg
|
||||
func TestStream_PulsarTtMsgStream_1(t *testing.T) {
|
||||
pulsarAddr := getPulsarAddress()
|
||||
c1 := funcutil.RandomString(8)
|
||||
@ -1092,22 +1091,25 @@ func TestStream_PulsarTtMsgStream_1(t *testing.T) {
|
||||
outputStream.Close()
|
||||
}
|
||||
|
||||
//
|
||||
// This testcase will generate MsgPacks as following:
|
||||
//
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
//
|
||||
// c1 |----------|----------|----------|----------|----------|----------|
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
//
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
//
|
||||
// Insert Insert Insert Insert Insert Insert
|
||||
//
|
||||
// c2 |----------|----------|----------|----------|----------|----------|
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
// Then check:
|
||||
// 1. ttMsgStream consumer can seek to the right position and resume
|
||||
// 2. The count of consumed msg should be equal to the count of produced msg
|
||||
//
|
||||
// ^ ^ ^ ^ ^ ^
|
||||
// TT(10) TT(20) TT(30) TT(40) TT(50) TT(100)
|
||||
//
|
||||
// Then check:
|
||||
// 1. ttMsgStream consumer can seek to the right position and resume
|
||||
// 2. The count of consumed msg should be equal to the count of produced msg
|
||||
func TestStream_PulsarTtMsgStream_2(t *testing.T) {
|
||||
pulsarAddr := getPulsarAddress()
|
||||
c1 := funcutil.RandomString(8)
|
||||
|
||||
@ -100,7 +100,7 @@ func (kc *kafkaClient) getKafkaProducer() (*kafka.Producer, error) {
|
||||
panic(ev)
|
||||
}
|
||||
default:
|
||||
log.Debug("kafka producer event", zap.Any("event", ev))
|
||||
log.Info("kafka producer event", zap.Any("event", ev))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -59,7 +59,7 @@ func (kp *kafkaProducer) Close() {
|
||||
|
||||
cost := time.Since(start).Milliseconds()
|
||||
if cost > 500 {
|
||||
log.Debug("kafka producer is closed", zap.Any("topic", kp.topic), zap.Int64("time cost(ms)", cost))
|
||||
log.Info("kafka producer is closed", zap.Any("topic", kp.topic), zap.Int64("time cost(ms)", cost))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ func (pc *Consumer) Chan() <-chan mqwrapper.Message {
|
||||
select {
|
||||
case msg, ok := <-pc.c.Chan():
|
||||
if !ok {
|
||||
log.Debug("pulsar consumer channel closed")
|
||||
log.Info("pulsar consumer channel closed")
|
||||
return
|
||||
}
|
||||
if !pc.skip {
|
||||
|
||||
@ -170,7 +170,7 @@ func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.Creat
|
||||
// avoid data race
|
||||
lenOfSchema := len(request.Schema)
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -199,7 +199,7 @@ func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.Creat
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -235,7 +235,7 @@ func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.Creat
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -275,7 +275,7 @@ func (node *Proxy) DropCollection(ctx context.Context, request *milvuspb.DropCol
|
||||
chTicker: node.chTicker,
|
||||
}
|
||||
|
||||
log.Debug("DropCollection received",
|
||||
log.Info("DropCollection received",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", request.DbName),
|
||||
@ -296,7 +296,7 @@ func (node *Proxy) DropCollection(ctx context.Context, request *milvuspb.DropCol
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("DropCollection enqueued",
|
||||
log.Info("DropCollection enqueued",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", dct.ID()),
|
||||
@ -323,7 +323,7 @@ func (node *Proxy) DropCollection(ctx context.Context, request *milvuspb.DropCol
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("DropCollection done",
|
||||
log.Info("DropCollection done",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", dct.ID()),
|
||||
@ -353,7 +353,7 @@ func (node *Proxy) HasCollection(ctx context.Context, request *milvuspb.HasColle
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug("HasCollection received",
|
||||
log.Info("HasCollection received",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", request.DbName),
|
||||
@ -384,7 +384,7 @@ func (node *Proxy) HasCollection(ctx context.Context, request *milvuspb.HasColle
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("HasCollection enqueued",
|
||||
log.Info("HasCollection enqueued",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", hct.ID()),
|
||||
@ -414,7 +414,7 @@ func (node *Proxy) HasCollection(ctx context.Context, request *milvuspb.HasColle
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("HasCollection done",
|
||||
log.Info("HasCollection done",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", hct.ID()),
|
||||
@ -450,7 +450,7 @@ func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCol
|
||||
indexCoord: node.indexCoord,
|
||||
}
|
||||
|
||||
log.Debug("LoadCollection received",
|
||||
log.Info("LoadCollection received",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", request.DbName),
|
||||
@ -472,7 +472,7 @@ func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCol
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("LoadCollection enqueued",
|
||||
log.Info("LoadCollection enqueued",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", lct.ID()),
|
||||
@ -499,7 +499,7 @@ func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCol
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("LoadCollection done",
|
||||
log.Info("LoadCollection done",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", lct.ID()),
|
||||
@ -535,7 +535,7 @@ func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.Rele
|
||||
chMgr: node.chMgr,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -559,7 +559,7 @@ func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.Rele
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -589,7 +589,7 @@ func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.Rele
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -628,7 +628,7 @@ func (node *Proxy) DescribeCollection(ctx context.Context, request *milvuspb.Des
|
||||
rootCoord: node.rootCoord,
|
||||
}
|
||||
|
||||
log.Debug("DescribeCollection received",
|
||||
log.Info("DescribeCollection received",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", request.DbName),
|
||||
@ -652,7 +652,7 @@ func (node *Proxy) DescribeCollection(ctx context.Context, request *milvuspb.Des
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("DescribeCollection enqueued",
|
||||
log.Info("DescribeCollection enqueued",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", dct.ID()),
|
||||
@ -683,7 +683,7 @@ func (node *Proxy) DescribeCollection(ctx context.Context, request *milvuspb.Des
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("DescribeCollection done",
|
||||
log.Info("DescribeCollection done",
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", dct.ID()),
|
||||
@ -826,7 +826,7 @@ func (node *Proxy) GetCollectionStatistics(ctx context.Context, request *milvusp
|
||||
dataCoord: node.dataCoord,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -853,7 +853,7 @@ func (node *Proxy) GetCollectionStatistics(ctx context.Context, request *milvusp
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -886,7 +886,7 @@ func (node *Proxy) GetCollectionStatistics(ctx context.Context, request *milvusp
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -921,7 +921,7 @@ func (node *Proxy) ShowCollections(ctx context.Context, request *milvuspb.ShowCo
|
||||
rootCoord: node.rootCoord,
|
||||
}
|
||||
|
||||
log.Debug("ShowCollections received",
|
||||
log.Info("ShowCollections received",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("DbName", request.DbName),
|
||||
zap.Uint64("TimeStamp", request.TimeStamp),
|
||||
@ -949,7 +949,7 @@ func (node *Proxy) ShowCollections(ctx context.Context, request *milvuspb.ShowCo
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("ShowCollections enqueued",
|
||||
log.Info("ShowCollections enqueued",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", sct.ID()),
|
||||
zap.String("DbName", sct.ShowCollectionsRequest.DbName),
|
||||
@ -980,7 +980,7 @@ func (node *Proxy) ShowCollections(ctx context.Context, request *milvuspb.ShowCo
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("ShowCollections Done",
|
||||
log.Info("ShowCollections Done",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("MsgID", sct.ID()),
|
||||
zap.String("DbName", request.DbName),
|
||||
@ -1014,7 +1014,7 @@ func (node *Proxy) AlterCollection(ctx context.Context, request *milvuspb.AlterC
|
||||
rootCoord: node.rootCoord,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1037,7 +1037,7 @@ func (node *Proxy) AlterCollection(ctx context.Context, request *milvuspb.AlterC
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1067,7 +1067,7 @@ func (node *Proxy) AlterCollection(ctx context.Context, request *milvuspb.AlterC
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1103,7 +1103,7 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
|
||||
result: nil,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived("CreatePartition"),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1129,7 +1129,7 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued("CreatePartition"),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1161,7 +1161,7 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone("CreatePartition"),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1199,7 +1199,7 @@ func (node *Proxy) DropPartition(ctx context.Context, request *milvuspb.DropPart
|
||||
result: nil,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1225,7 +1225,7 @@ func (node *Proxy) DropPartition(ctx context.Context, request *milvuspb.DropPart
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1257,7 +1257,7 @@ func (node *Proxy) DropPartition(ctx context.Context, request *milvuspb.DropPart
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1298,7 +1298,7 @@ func (node *Proxy) HasPartition(ctx context.Context, request *milvuspb.HasPartit
|
||||
result: nil,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1328,7 +1328,7 @@ func (node *Proxy) HasPartition(ctx context.Context, request *milvuspb.HasPartit
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1364,7 +1364,7 @@ func (node *Proxy) HasPartition(ctx context.Context, request *milvuspb.HasPartit
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1402,7 +1402,7 @@ func (node *Proxy) LoadPartitions(ctx context.Context, request *milvuspb.LoadPar
|
||||
indexCoord: node.indexCoord,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1429,7 +1429,7 @@ func (node *Proxy) LoadPartitions(ctx context.Context, request *milvuspb.LoadPar
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1462,7 +1462,7 @@ func (node *Proxy) LoadPartitions(ctx context.Context, request *milvuspb.LoadPar
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1500,7 +1500,7 @@ func (node *Proxy) ReleasePartitions(ctx context.Context, request *milvuspb.Rele
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1527,7 +1527,7 @@ func (node *Proxy) ReleasePartitions(ctx context.Context, request *milvuspb.Rele
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1560,7 +1560,7 @@ func (node *Proxy) ReleasePartitions(ctx context.Context, request *milvuspb.Rele
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1600,7 +1600,7 @@ func (node *Proxy) GetPartitionStatistics(ctx context.Context, request *milvuspb
|
||||
dataCoord: node.dataCoord,
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1629,7 +1629,7 @@ func (node *Proxy) GetPartitionStatistics(ctx context.Context, request *milvuspb
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1664,7 +1664,7 @@ func (node *Proxy) GetPartitionStatistics(ctx context.Context, request *milvuspb
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1708,7 +1708,7 @@ func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPar
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1733,7 +1733,7 @@ func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPar
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1768,7 +1768,7 @@ func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPar
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1850,7 +1850,7 @@ func (node *Proxy) GetLoadingProgress(ctx context.Context, request *milvuspb.Get
|
||||
defer sp.Finish()
|
||||
traceID, _, _ := trace.InfoFromSpan(sp)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.TotalLabel).Inc()
|
||||
logger.Debug(
|
||||
logger.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.Any("request", request))
|
||||
@ -1898,7 +1898,7 @@ func (node *Proxy) GetLoadingProgress(ctx context.Context, request *milvuspb.Get
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug(
|
||||
logger.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.Any("request", request))
|
||||
@ -1935,7 +1935,7 @@ func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateInde
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1964,7 +1964,7 @@ func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateInde
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -1999,7 +1999,7 @@ func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateInde
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2042,7 +2042,7 @@ func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.Describe
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2073,7 +2073,7 @@ func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.Describe
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2114,7 +2114,7 @@ func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.Describe
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2155,7 +2155,7 @@ func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexReq
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2183,7 +2183,7 @@ func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexReq
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2218,7 +2218,7 @@ func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexReq
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2263,7 +2263,7 @@ func (node *Proxy) GetIndexBuildProgress(ctx context.Context, request *milvuspb.
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2293,7 +2293,7 @@ func (node *Proxy) GetIndexBuildProgress(ctx context.Context, request *milvuspb.
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2329,7 +2329,7 @@ func (node *Proxy) GetIndexBuildProgress(ctx context.Context, request *milvuspb.
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2373,7 +2373,7 @@ func (node *Proxy) GetIndexState(ctx context.Context, request *milvuspb.GetIndex
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.TotalLabel).Inc()
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2404,7 +2404,7 @@ func (node *Proxy) GetIndexState(ctx context.Context, request *milvuspb.GetIndex
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2440,7 +2440,7 @@ func (node *Proxy) GetIndexState(ctx context.Context, request *milvuspb.GetIndex
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2865,7 +2865,7 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2887,7 +2887,7 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -2916,7 +2916,7 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3080,7 +3080,7 @@ func (node *Proxy) CreateAlias(ctx context.Context, request *milvuspb.CreateAlia
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3106,7 +3106,7 @@ func (node *Proxy) CreateAlias(ctx context.Context, request *milvuspb.CreateAlia
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3137,7 +3137,7 @@ func (node *Proxy) CreateAlias(ctx context.Context, request *milvuspb.CreateAlia
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3174,7 +3174,7 @@ func (node *Proxy) DropAlias(ctx context.Context, request *milvuspb.DropAliasReq
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3197,7 +3197,7 @@ func (node *Proxy) DropAlias(ctx context.Context, request *milvuspb.DropAliasReq
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3227,7 +3227,7 @@ func (node *Proxy) DropAlias(ctx context.Context, request *milvuspb.DropAliasReq
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3263,7 +3263,7 @@ func (node *Proxy) AlterAlias(ctx context.Context, request *milvuspb.AlterAliasR
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.TotalLabel).Inc()
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcReceived(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3288,7 +3288,7 @@ func (node *Proxy) AlterAlias(ctx context.Context, request *milvuspb.AlterAliasR
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcEnqueued(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3320,7 +3320,7 @@ func (node *Proxy) AlterAlias(ctx context.Context, request *milvuspb.AlterAliasR
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug(
|
||||
log.Info(
|
||||
rpcDone(method),
|
||||
zap.String("traceID", traceID),
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
@ -3432,7 +3432,7 @@ func (node *Proxy) GetDdChannel(ctx context.Context, request *internalpb.GetDdCh
|
||||
|
||||
// GetPersistentSegmentInfo get the information of sealed segment.
|
||||
func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.GetPersistentSegmentInfoRequest) (*milvuspb.GetPersistentSegmentInfoResponse, error) {
|
||||
log.Debug("GetPersistentSegmentInfo",
|
||||
log.Info("GetPersistentSegmentInfo",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", req.DbName),
|
||||
zap.Any("collection", req.CollectionName))
|
||||
@ -3488,7 +3488,7 @@ func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.G
|
||||
resp.Status.Reason = fmt.Errorf("dataCoord:GetSegmentInfo, err:%w", err).Error()
|
||||
return resp, nil
|
||||
}
|
||||
log.Debug("GetPersistentSegmentInfo ", zap.Int("len(infos)", len(infoResp.Infos)), zap.Any("status", infoResp.Status))
|
||||
log.Info("GetPersistentSegmentInfo ", zap.Int("len(infos)", len(infoResp.Infos)), zap.Any("status", infoResp.Status))
|
||||
if infoResp.Status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method,
|
||||
metrics.FailLabel).Inc()
|
||||
@ -3515,7 +3515,7 @@ func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.G
|
||||
|
||||
// GetQuerySegmentInfo gets segment information from QueryCoord.
|
||||
func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQuerySegmentInfoRequest) (*milvuspb.GetQuerySegmentInfoResponse, error) {
|
||||
log.Debug("GetQuerySegmentInfo",
|
||||
log.Info("GetQuerySegmentInfo",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("db", req.DbName),
|
||||
zap.Any("collection", req.CollectionName))
|
||||
@ -3556,7 +3556,7 @@ func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQue
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
}
|
||||
log.Debug("GetQuerySegmentInfo ", zap.Any("infos", infoResp.Infos), zap.Any("status", infoResp.Status))
|
||||
log.Info("GetQuerySegmentInfo ", zap.Any("infos", infoResp.Infos), zap.Any("status", infoResp.Status))
|
||||
if infoResp.Status.ErrorCode != commonpb.ErrorCode_Success {
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.FailLabel).Inc()
|
||||
log.Error("Failed to get segment info from QueryCoord", zap.String("errMsg", infoResp.Status.Reason))
|
||||
@ -3814,7 +3814,7 @@ func (node *Proxy) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetrics
|
||||
|
||||
// LoadBalance would do a load balancing operation between query nodes
|
||||
func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceRequest) (*commonpb.Status, error) {
|
||||
log.Debug("Proxy.LoadBalance",
|
||||
log.Info("Proxy.LoadBalance",
|
||||
zap.Int64("proxy_id", Params.ProxyCfg.GetNodeID()),
|
||||
zap.Any("req", req))
|
||||
|
||||
@ -3856,14 +3856,14 @@ func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceReq
|
||||
status.Reason = infoResp.Reason
|
||||
return status, nil
|
||||
}
|
||||
log.Debug("LoadBalance Done", zap.Any("req", req), zap.Any("status", infoResp))
|
||||
log.Info("LoadBalance Done", zap.Any("req", req), zap.Any("status", infoResp))
|
||||
status.ErrorCode = commonpb.ErrorCode_Success
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// GetReplicas gets replica info
|
||||
func (node *Proxy) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasRequest) (*milvuspb.GetReplicasResponse, error) {
|
||||
log.Debug("received get replicas request", zap.Int64("collection", req.GetCollectionID()), zap.Bool("with shard nodes", req.GetWithShardNodes()))
|
||||
log.Info("received get replicas request", zap.Int64("collection", req.GetCollectionID()), zap.Bool("with shard nodes", req.GetWithShardNodes()))
|
||||
resp := &milvuspb.GetReplicasResponse{}
|
||||
if !node.checkHealthy() {
|
||||
resp.Status = unhealthyStatus()
|
||||
@ -3882,13 +3882,13 @@ func (node *Proxy) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReq
|
||||
resp.Status.Reason = err.Error()
|
||||
return resp, nil
|
||||
}
|
||||
log.Debug("received get replicas response", zap.Any("resp", resp), zap.Error(err))
|
||||
log.Info("received get replicas response", zap.Any("resp", resp), zap.Error(err))
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetCompactionState gets the compaction state of multiple segments
|
||||
func (node *Proxy) GetCompactionState(ctx context.Context, req *milvuspb.GetCompactionStateRequest) (*milvuspb.GetCompactionStateResponse, error) {
|
||||
log.Debug("received GetCompactionState request", zap.Int64("compactionID", req.GetCompactionID()))
|
||||
log.Info("received GetCompactionState request", zap.Int64("compactionID", req.GetCompactionID()))
|
||||
resp := &milvuspb.GetCompactionStateResponse{}
|
||||
if !node.checkHealthy() {
|
||||
resp.Status = unhealthyStatus()
|
||||
@ -3896,7 +3896,7 @@ func (node *Proxy) GetCompactionState(ctx context.Context, req *milvuspb.GetComp
|
||||
}
|
||||
|
||||
resp, err := node.dataCoord.GetCompactionState(ctx, req)
|
||||
log.Debug("received GetCompactionState response", zap.Int64("compactionID", req.GetCompactionID()), zap.Any("resp", resp), zap.Error(err))
|
||||
log.Info("received GetCompactionState response", zap.Int64("compactionID", req.GetCompactionID()), zap.Any("resp", resp), zap.Error(err))
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -3916,7 +3916,7 @@ func (node *Proxy) ManualCompaction(ctx context.Context, req *milvuspb.ManualCom
|
||||
|
||||
// GetCompactionStateWithPlans returns the compactions states with the given plan ID
|
||||
func (node *Proxy) GetCompactionStateWithPlans(ctx context.Context, req *milvuspb.GetCompactionPlansRequest) (*milvuspb.GetCompactionPlansResponse, error) {
|
||||
log.Debug("received GetCompactionStateWithPlans request", zap.Int64("compactionID", req.GetCompactionID()))
|
||||
log.Info("received GetCompactionStateWithPlans request", zap.Int64("compactionID", req.GetCompactionID()))
|
||||
resp := &milvuspb.GetCompactionPlansResponse{}
|
||||
if !node.checkHealthy() {
|
||||
resp.Status = unhealthyStatus()
|
||||
@ -3924,13 +3924,13 @@ func (node *Proxy) GetCompactionStateWithPlans(ctx context.Context, req *milvusp
|
||||
}
|
||||
|
||||
resp, err := node.dataCoord.GetCompactionStateWithPlans(ctx, req)
|
||||
log.Debug("received GetCompactionStateWithPlans response", zap.Int64("compactionID", req.GetCompactionID()), zap.Any("resp", resp), zap.Error(err))
|
||||
log.Info("received GetCompactionStateWithPlans response", zap.Int64("compactionID", req.GetCompactionID()), zap.Any("resp", resp), zap.Error(err))
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// GetFlushState gets the flush state of multiple segments
|
||||
func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateRequest) (*milvuspb.GetFlushStateResponse, error) {
|
||||
log.Debug("received get flush state request", zap.Any("request", req))
|
||||
log.Info("received get flush state request", zap.Any("request", req))
|
||||
var err error
|
||||
resp := &milvuspb.GetFlushStateResponse{}
|
||||
if !node.checkHealthy() {
|
||||
@ -3944,7 +3944,7 @@ func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStat
|
||||
log.Warn("failed to get flush state response", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("received get flush state response", zap.Any("response", resp))
|
||||
log.Info("received get flush state response", zap.Any("response", resp))
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@ -4014,7 +4014,7 @@ func (node *Proxy) Import(ctx context.Context, req *milvuspb.ImportRequest) (*mi
|
||||
|
||||
// GetImportState checks import task state from RootCoord.
|
||||
func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportStateRequest) (*milvuspb.GetImportStateResponse, error) {
|
||||
log.Debug("received get import state request", zap.Int64("taskID", req.GetTask()))
|
||||
log.Info("received get import state request", zap.Int64("taskID", req.GetTask()))
|
||||
resp := &milvuspb.GetImportStateResponse{}
|
||||
if !node.checkHealthy() {
|
||||
resp.Status = unhealthyStatus()
|
||||
@ -4034,7 +4034,7 @@ func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportSt
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
log.Debug("successfully received get import state response", zap.Int64("taskID", req.GetTask()), zap.Any("resp", resp), zap.Error(err))
|
||||
log.Info("successfully received get import state response", zap.Int64("taskID", req.GetTask()), zap.Any("resp", resp), zap.Error(err))
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.SuccessLabel).Inc()
|
||||
metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return resp, nil
|
||||
@ -4042,7 +4042,7 @@ func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportSt
|
||||
|
||||
// ListImportTasks get id array of all import tasks from rootcoord
|
||||
func (node *Proxy) ListImportTasks(ctx context.Context, req *milvuspb.ListImportTasksRequest) (*milvuspb.ListImportTasksResponse, error) {
|
||||
log.Debug("received list import tasks request")
|
||||
log.Info("received list import tasks request")
|
||||
resp := &milvuspb.ListImportTasksResponse{}
|
||||
if !node.checkHealthy() {
|
||||
resp.Status = unhealthyStatus()
|
||||
@ -4061,7 +4061,7 @@ func (node *Proxy) ListImportTasks(ctx context.Context, req *milvuspb.ListImport
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
log.Debug("successfully received list import tasks response", zap.String("collection", req.CollectionName), zap.Any("tasks", resp.Tasks))
|
||||
log.Info("successfully received list import tasks response", zap.String("collection", req.CollectionName), zap.Any("tasks", resp.Tasks))
|
||||
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method, metrics.SuccessLabel).Inc()
|
||||
metrics.ProxyReqLatency.WithLabelValues(strconv.FormatInt(Params.ProxyCfg.GetNodeID(), 10), method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
return resp, err
|
||||
@ -4070,7 +4070,7 @@ func (node *Proxy) ListImportTasks(ctx context.Context, req *milvuspb.ListImport
|
||||
// InvalidateCredentialCache invalidate the credential cache of specified username.
|
||||
func (node *Proxy) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
|
||||
ctx = logutil.WithModule(ctx, moduleName)
|
||||
logutil.Logger(ctx).Debug("received request to invalidate credential cache",
|
||||
logutil.Logger(ctx).Info("received request to invalidate credential cache",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("username", request.Username))
|
||||
if !node.checkHealthy() {
|
||||
@ -4081,7 +4081,7 @@ func (node *Proxy) InvalidateCredentialCache(ctx context.Context, request *proxy
|
||||
if globalMetaCache != nil {
|
||||
globalMetaCache.RemoveCredential(username) // no need to return error, though credential may be not cached
|
||||
}
|
||||
logutil.Logger(ctx).Debug("complete to invalidate credential cache",
|
||||
logutil.Logger(ctx).Info("complete to invalidate credential cache",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("username", request.Username))
|
||||
|
||||
@ -4094,7 +4094,7 @@ func (node *Proxy) InvalidateCredentialCache(ctx context.Context, request *proxy
|
||||
// UpdateCredentialCache update the credential cache of specified username.
|
||||
func (node *Proxy) UpdateCredentialCache(ctx context.Context, request *proxypb.UpdateCredCacheRequest) (*commonpb.Status, error) {
|
||||
ctx = logutil.WithModule(ctx, moduleName)
|
||||
logutil.Logger(ctx).Debug("received request to update credential cache",
|
||||
logutil.Logger(ctx).Info("received request to update credential cache",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("username", request.Username))
|
||||
if !node.checkHealthy() {
|
||||
@ -4108,7 +4108,7 @@ func (node *Proxy) UpdateCredentialCache(ctx context.Context, request *proxypb.U
|
||||
if globalMetaCache != nil {
|
||||
globalMetaCache.UpdateCredential(credInfo) // no need to return error, though credential may be not cached
|
||||
}
|
||||
logutil.Logger(ctx).Debug("complete to update credential cache",
|
||||
logutil.Logger(ctx).Info("complete to update credential cache",
|
||||
zap.String("role", typeutil.ProxyRole),
|
||||
zap.String("username", request.Username))
|
||||
|
||||
@ -4119,7 +4119,7 @@ func (node *Proxy) UpdateCredentialCache(ctx context.Context, request *proxypb.U
|
||||
}
|
||||
|
||||
func (node *Proxy) CreateCredential(ctx context.Context, req *milvuspb.CreateCredentialRequest) (*commonpb.Status, error) {
|
||||
log.Debug("CreateCredential", zap.String("role", typeutil.ProxyRole), zap.String("username", req.Username))
|
||||
log.Info("CreateCredential", zap.String("role", typeutil.ProxyRole), zap.String("username", req.Username))
|
||||
if !node.checkHealthy() {
|
||||
return unhealthyStatus(), nil
|
||||
}
|
||||
@ -4172,7 +4172,7 @@ func (node *Proxy) CreateCredential(ctx context.Context, req *milvuspb.CreateCre
|
||||
}
|
||||
|
||||
func (node *Proxy) UpdateCredential(ctx context.Context, req *milvuspb.UpdateCredentialRequest) (*commonpb.Status, error) {
|
||||
log.Debug("UpdateCredential", zap.String("role", typeutil.ProxyRole), zap.String("username", req.Username))
|
||||
log.Info("UpdateCredential", zap.String("role", typeutil.ProxyRole), zap.String("username", req.Username))
|
||||
if !node.checkHealthy() {
|
||||
return unhealthyStatus(), nil
|
||||
}
|
||||
@ -4233,7 +4233,7 @@ func (node *Proxy) UpdateCredential(ctx context.Context, req *milvuspb.UpdateCre
|
||||
}
|
||||
|
||||
func (node *Proxy) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCredentialRequest) (*commonpb.Status, error) {
|
||||
log.Debug("DeleteCredential", zap.String("role", typeutil.ProxyRole), zap.String("username", req.Username))
|
||||
log.Info("DeleteCredential", zap.String("role", typeutil.ProxyRole), zap.String("username", req.Username))
|
||||
if !node.checkHealthy() {
|
||||
return unhealthyStatus(), nil
|
||||
}
|
||||
@ -4256,7 +4256,7 @@ func (node *Proxy) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCre
|
||||
}
|
||||
|
||||
func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUsersRequest) (*milvuspb.ListCredUsersResponse, error) {
|
||||
log.Debug("ListCredUsers", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("ListCredUsers", zap.String("role", typeutil.ProxyRole))
|
||||
if !node.checkHealthy() {
|
||||
return &milvuspb.ListCredUsersResponse{Status: unhealthyStatus()}, nil
|
||||
}
|
||||
@ -4283,7 +4283,7 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser
|
||||
}
|
||||
|
||||
func (node *Proxy) CreateRole(ctx context.Context, req *milvuspb.CreateRoleRequest) (*commonpb.Status, error) {
|
||||
logger.Debug("CreateRole", zap.Any("req", req))
|
||||
logger.Info("CreateRole", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return errorutil.UnhealthyStatus(code), nil
|
||||
}
|
||||
@ -4311,7 +4311,7 @@ func (node *Proxy) CreateRole(ctx context.Context, req *milvuspb.CreateRoleReque
|
||||
}
|
||||
|
||||
func (node *Proxy) DropRole(ctx context.Context, req *milvuspb.DropRoleRequest) (*commonpb.Status, error) {
|
||||
logger.Debug("DropRole", zap.Any("req", req))
|
||||
logger.Info("DropRole", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return errorutil.UnhealthyStatus(code), nil
|
||||
}
|
||||
@ -4340,7 +4340,7 @@ func (node *Proxy) DropRole(ctx context.Context, req *milvuspb.DropRoleRequest)
|
||||
}
|
||||
|
||||
func (node *Proxy) OperateUserRole(ctx context.Context, req *milvuspb.OperateUserRoleRequest) (*commonpb.Status, error) {
|
||||
logger.Debug("OperateUserRole", zap.Any("req", req))
|
||||
logger.Info("OperateUserRole", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return errorutil.UnhealthyStatus(code), nil
|
||||
}
|
||||
@ -4369,7 +4369,7 @@ func (node *Proxy) OperateUserRole(ctx context.Context, req *milvuspb.OperateUse
|
||||
}
|
||||
|
||||
func (node *Proxy) SelectRole(ctx context.Context, req *milvuspb.SelectRoleRequest) (*milvuspb.SelectRoleResponse, error) {
|
||||
logger.Debug("SelectRole", zap.Any("req", req))
|
||||
logger.Info("SelectRole", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return &milvuspb.SelectRoleResponse{Status: errorutil.UnhealthyStatus(code)}, nil
|
||||
}
|
||||
@ -4399,7 +4399,7 @@ func (node *Proxy) SelectRole(ctx context.Context, req *milvuspb.SelectRoleReque
|
||||
}
|
||||
|
||||
func (node *Proxy) SelectUser(ctx context.Context, req *milvuspb.SelectUserRequest) (*milvuspb.SelectUserResponse, error) {
|
||||
logger.Debug("SelectUser", zap.Any("req", req))
|
||||
logger.Info("SelectUser", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return &milvuspb.SelectUserResponse{Status: errorutil.UnhealthyStatus(code)}, nil
|
||||
}
|
||||
@ -4461,7 +4461,7 @@ func (node *Proxy) validPrivilegeParams(req *milvuspb.OperatePrivilegeRequest) e
|
||||
}
|
||||
|
||||
func (node *Proxy) OperatePrivilege(ctx context.Context, req *milvuspb.OperatePrivilegeRequest) (*commonpb.Status, error) {
|
||||
logger.Debug("OperatePrivilege", zap.Any("req", req))
|
||||
logger.Info("OperatePrivilege", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return errorutil.UnhealthyStatus(code), nil
|
||||
}
|
||||
@ -4517,7 +4517,7 @@ func (node *Proxy) validGrantParams(req *milvuspb.SelectGrantRequest) error {
|
||||
}
|
||||
|
||||
func (node *Proxy) SelectGrant(ctx context.Context, req *milvuspb.SelectGrantRequest) (*milvuspb.SelectGrantResponse, error) {
|
||||
logger.Debug("SelectGrant", zap.Any("req", req))
|
||||
logger.Info("SelectGrant", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return &milvuspb.SelectGrantResponse{Status: errorutil.UnhealthyStatus(code)}, nil
|
||||
}
|
||||
@ -4545,7 +4545,7 @@ func (node *Proxy) SelectGrant(ctx context.Context, req *milvuspb.SelectGrantReq
|
||||
}
|
||||
|
||||
func (node *Proxy) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
|
||||
logger.Debug("RefreshPrivilegeInfoCache", zap.Any("req", req))
|
||||
logger.Info("RefreshPrivilegeInfoCache", zap.Any("req", req))
|
||||
if code, ok := node.checkHealthyAndReturnCode(); !ok {
|
||||
return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError()
|
||||
}
|
||||
@ -4563,7 +4563,7 @@ func (node *Proxy) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.Refr
|
||||
}, err
|
||||
}
|
||||
}
|
||||
logger.Debug("RefreshPrivilegeInfoCache success")
|
||||
logger.Info("RefreshPrivilegeInfoCache success")
|
||||
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
|
||||
@ -165,7 +165,7 @@ func InitMetaCache(ctx context.Context, rootCoord types.RootCoord, queryCoord ty
|
||||
return err
|
||||
}
|
||||
globalMetaCache.InitPolicyInfo(resp.PolicyInfos, resp.UserRoles)
|
||||
log.Debug("success to init meta cache", zap.Strings("policy_infos", resp.PolicyInfos))
|
||||
log.Info("success to init meta cache", zap.Strings("policy_infos", resp.PolicyInfos))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -93,7 +93,7 @@ func (rl *rateLimiter) printRates(rates []*internalpb.Rate) {
|
||||
// fmt.Printf("%s -> %v\n", r.GetRt().String(), r.GetR())
|
||||
//}
|
||||
//fmt.Printf("---------------------------------\n")
|
||||
log.Debug("RateLimiter setRates", zap.Any("rates", rates))
|
||||
log.Info("RateLimiter setRates", zap.Any("rates", rates))
|
||||
}
|
||||
|
||||
// registerLimiters register limiter for all rate types.
|
||||
|
||||
@ -78,7 +78,7 @@ func PrivilegeInterceptor(ctx context.Context, req interface{}) (context.Context
|
||||
log.Debug("PrivilegeInterceptor", zap.String("type", reflect.TypeOf(req).String()))
|
||||
privilegeExt, err := funcutil.GetPrivilegeExtObj(req)
|
||||
if err != nil {
|
||||
log.Debug("GetPrivilegeExtObj err", zap.Error(err))
|
||||
log.Warn("GetPrivilegeExtObj err", zap.Error(err))
|
||||
return ctx, nil
|
||||
}
|
||||
username, err := GetCurUserFromContext(ctx)
|
||||
@ -166,7 +166,7 @@ func PrivilegeInterceptor(ctx context.Context, req interface{}) (context.Context
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("permission deny", zap.String("policy", policy), zap.Strings("roles", roleNames))
|
||||
log.Info("permission deny", zap.String("policy", policy), zap.Strings("roles", roleNames))
|
||||
return ctx, status.Error(codes.PermissionDenied, fmt.Sprintf("%s: permission deny", objectPrivilege))
|
||||
}
|
||||
|
||||
|
||||
@ -184,7 +184,7 @@ func (node *Proxy) Init() error {
|
||||
log.Info("init session for Proxy done")
|
||||
|
||||
node.factory.Init(&Params)
|
||||
log.Debug("init parameters for factory", zap.String("role", typeutil.ProxyRole), zap.Any("parameters", Params.ServiceParam))
|
||||
log.Info("init parameters for factory", zap.String("role", typeutil.ProxyRole), zap.Any("parameters", Params.ServiceParam))
|
||||
|
||||
err := node.initRateCollector()
|
||||
if err != nil {
|
||||
@ -192,7 +192,7 @@ func (node *Proxy) Init() error {
|
||||
}
|
||||
log.Info("Proxy init rateCollector done", zap.Int64("nodeID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create id allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
log.Info("create id allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
idAllocator, err := allocator.NewIDAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.GetNodeID())
|
||||
if err != nil {
|
||||
log.Warn("failed to create id allocator",
|
||||
@ -201,9 +201,9 @@ func (node *Proxy) Init() error {
|
||||
return err
|
||||
}
|
||||
node.rowIDAllocator = idAllocator
|
||||
log.Debug("create id allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
log.Info("create id allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create timestamp allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
log.Info("create timestamp allocator", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
tsoAllocator, err := newTimestampAllocator(node.ctx, node.rootCoord, Params.ProxyCfg.GetNodeID())
|
||||
if err != nil {
|
||||
log.Warn("failed to create timestamp allocator",
|
||||
@ -212,9 +212,9 @@ func (node *Proxy) Init() error {
|
||||
return err
|
||||
}
|
||||
node.tsoAllocator = tsoAllocator
|
||||
log.Debug("create timestamp allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
log.Info("create timestamp allocator done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create segment id assigner", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
log.Info("create segment id assigner", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
segAssigner, err := newSegIDAssigner(node.ctx, node.dataCoord, node.lastTick)
|
||||
if err != nil {
|
||||
log.Warn("failed to create segment id assigner",
|
||||
@ -224,38 +224,38 @@ func (node *Proxy) Init() error {
|
||||
}
|
||||
node.segAssigner = segAssigner
|
||||
node.segAssigner.PeerID = Params.ProxyCfg.GetNodeID()
|
||||
log.Debug("create segment id assigner done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
log.Info("create segment id assigner done", zap.String("role", typeutil.ProxyRole), zap.Int64("ProxyID", Params.ProxyCfg.GetNodeID()))
|
||||
|
||||
log.Debug("create channels manager", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create channels manager", zap.String("role", typeutil.ProxyRole))
|
||||
dmlChannelsFunc := getDmlChannelsFunc(node.ctx, node.rootCoord)
|
||||
chMgr := newChannelsMgrImpl(dmlChannelsFunc, defaultInsertRepackFunc, node.factory)
|
||||
node.chMgr = chMgr
|
||||
log.Debug("create channels manager done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create channels manager done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug("create task scheduler", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create task scheduler", zap.String("role", typeutil.ProxyRole))
|
||||
node.sched, err = newTaskScheduler(node.ctx, node.tsoAllocator, node.factory)
|
||||
if err != nil {
|
||||
log.Warn("failed to create task scheduler", zap.Error(err), zap.String("role", typeutil.ProxyRole))
|
||||
return err
|
||||
}
|
||||
log.Debug("create task scheduler done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create task scheduler done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
syncTimeTickInterval := Params.ProxyCfg.TimeTickInterval / 2
|
||||
log.Debug("create channels time ticker",
|
||||
log.Info("create channels time ticker",
|
||||
zap.String("role", typeutil.ProxyRole), zap.Duration("syncTimeTickInterval", syncTimeTickInterval))
|
||||
node.chTicker = newChannelsTimeTicker(node.ctx, Params.ProxyCfg.TimeTickInterval/2, []string{}, node.sched.getPChanStatistics, tsoAllocator)
|
||||
log.Debug("create channels time ticker done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create channels time ticker done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug("create metrics cache manager", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create metrics cache manager", zap.String("role", typeutil.ProxyRole))
|
||||
node.metricsCacheManager = metricsinfo.NewMetricsCacheManager()
|
||||
log.Debug("create metrics cache manager done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("create metrics cache manager done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug("init meta cache", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("init meta cache", zap.String("role", typeutil.ProxyRole))
|
||||
if err := InitMetaCache(node.ctx, node.rootCoord, node.queryCoord, node.shardMgr); err != nil {
|
||||
log.Warn("failed to init meta cache", zap.Error(err), zap.String("role", typeutil.ProxyRole))
|
||||
return err
|
||||
}
|
||||
log.Debug("init meta cache done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("init meta cache done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -328,33 +328,33 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
|
||||
|
||||
// Start starts a proxy node.
|
||||
func (node *Proxy) Start() error {
|
||||
log.Debug("start task scheduler", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start task scheduler", zap.String("role", typeutil.ProxyRole))
|
||||
if err := node.sched.Start(); err != nil {
|
||||
log.Warn("failed to start task scheduler", zap.Error(err), zap.String("role", typeutil.ProxyRole))
|
||||
return err
|
||||
}
|
||||
log.Debug("start task scheduler done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start task scheduler done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug("start id allocator", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start id allocator", zap.String("role", typeutil.ProxyRole))
|
||||
if err := node.rowIDAllocator.Start(); err != nil {
|
||||
log.Warn("failed to start id allocator", zap.Error(err), zap.String("role", typeutil.ProxyRole))
|
||||
return err
|
||||
}
|
||||
log.Debug("start id allocator done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start id allocator done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug("start segment id assigner", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start segment id assigner", zap.String("role", typeutil.ProxyRole))
|
||||
if err := node.segAssigner.Start(); err != nil {
|
||||
log.Warn("failed to start segment id assigner", zap.Error(err), zap.String("role", typeutil.ProxyRole))
|
||||
return err
|
||||
}
|
||||
log.Debug("start segment id assigner done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start segment id assigner done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
log.Debug("start channels time ticker", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start channels time ticker", zap.String("role", typeutil.ProxyRole))
|
||||
if err := node.chTicker.start(); err != nil {
|
||||
log.Warn("failed to start channels time ticker", zap.Error(err), zap.String("role", typeutil.ProxyRole))
|
||||
return err
|
||||
}
|
||||
log.Debug("start channels time ticker done", zap.String("role", typeutil.ProxyRole))
|
||||
log.Info("start channels time ticker done", zap.String("role", typeutil.ProxyRole))
|
||||
|
||||
node.sendChannelsTimeTickLoop()
|
||||
|
||||
@ -367,7 +367,7 @@ func (node *Proxy) Start() error {
|
||||
Params.ProxyCfg.CreatedTime = now
|
||||
Params.ProxyCfg.UpdatedTime = now
|
||||
|
||||
log.Debug("update state code", zap.String("role", typeutil.ProxyRole), zap.String("State", commonpb.StateCode_Healthy.String()))
|
||||
log.Info("update state code", zap.String("role", typeutil.ProxyRole), zap.String("State", commonpb.StateCode_Healthy.String()))
|
||||
node.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
|
||||
return nil
|
||||
|
||||
@ -583,7 +583,7 @@ func (sct *showCollectionsTask) Execute(ctx context.Context) error {
|
||||
for _, collectionName := range sct.CollectionNames {
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Debug("Failed to get collection id.", zap.Any("collectionName", collectionName),
|
||||
log.Warn("Failed to get collection id.", zap.Any("collectionName", collectionName),
|
||||
zap.Any("requestID", sct.Base.MsgID), zap.Any("requestType", "showCollections"))
|
||||
return err
|
||||
}
|
||||
@ -630,13 +630,13 @@ func (sct *showCollectionsTask) Execute(ctx context.Context) error {
|
||||
for offset, id := range resp.CollectionIDs {
|
||||
collectionName, ok := IDs2Names[id]
|
||||
if !ok {
|
||||
log.Debug("Failed to get collection info.", zap.Any("collectionName", collectionName),
|
||||
log.Warn("Failed to get collection info.", zap.Any("collectionName", collectionName),
|
||||
zap.Any("requestID", sct.Base.MsgID), zap.Any("requestType", "showCollections"))
|
||||
return errors.New("failed to show collections")
|
||||
}
|
||||
collectionInfo, err := globalMetaCache.GetCollectionInfo(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Debug("Failed to get collection info.", zap.Any("collectionName", collectionName),
|
||||
log.Warn("Failed to get collection info.", zap.Any("collectionName", collectionName),
|
||||
zap.Any("requestID", sct.Base.MsgID), zap.Any("requestType", "showCollections"))
|
||||
return err
|
||||
}
|
||||
@ -1060,7 +1060,7 @@ func (spt *showPartitionsTask) Execute(ctx context.Context) error {
|
||||
collectionName := spt.CollectionName
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
if err != nil {
|
||||
log.Debug("Failed to get collection id.", zap.Any("collectionName", collectionName),
|
||||
log.Warn("Failed to get collection id.", zap.Any("collectionName", collectionName),
|
||||
zap.Any("requestID", spt.Base.MsgID), zap.Any("requestType", "showPartitions"))
|
||||
return err
|
||||
}
|
||||
@ -1073,7 +1073,7 @@ func (spt *showPartitionsTask) Execute(ctx context.Context) error {
|
||||
for _, partitionName := range spt.PartitionNames {
|
||||
partitionID, err := globalMetaCache.GetPartitionID(ctx, collectionName, partitionName)
|
||||
if err != nil {
|
||||
log.Debug("Failed to get partition id.", zap.Any("partitionName", partitionName),
|
||||
log.Warn("Failed to get partition id.", zap.Any("partitionName", partitionName),
|
||||
zap.Any("requestID", spt.Base.MsgID), zap.Any("requestType", "showPartitions"))
|
||||
return err
|
||||
}
|
||||
@ -1113,13 +1113,13 @@ func (spt *showPartitionsTask) Execute(ctx context.Context) error {
|
||||
for offset, id := range resp.PartitionIDs {
|
||||
partitionName, ok := IDs2Names[id]
|
||||
if !ok {
|
||||
log.Debug("Failed to get partition id.", zap.Any("partitionName", partitionName),
|
||||
log.Warn("Failed to get partition id.", zap.Any("partitionName", partitionName),
|
||||
zap.Any("requestID", spt.Base.MsgID), zap.Any("requestType", "showPartitions"))
|
||||
return errors.New("failed to show partitions")
|
||||
}
|
||||
partitionInfo, err := globalMetaCache.GetPartitionInfo(ctx, collectionName, partitionName)
|
||||
if err != nil {
|
||||
log.Debug("Failed to get partition id.", zap.Any("partitionName", partitionName),
|
||||
log.Warn("Failed to get partition id.", zap.Any("partitionName", partitionName),
|
||||
zap.Any("requestID", spt.Base.MsgID), zap.Any("requestType", "showPartitions"))
|
||||
return err
|
||||
}
|
||||
@ -1285,7 +1285,7 @@ func (lct *loadCollectionTask) OnEnqueue() error {
|
||||
}
|
||||
|
||||
func (lct *loadCollectionTask) PreExecute(ctx context.Context) error {
|
||||
log.Debug("loadCollectionTask PreExecute", zap.String("role", typeutil.ProxyRole), zap.Int64("msgID", lct.Base.MsgID))
|
||||
log.Info("loadCollectionTask PreExecute", zap.String("role", typeutil.ProxyRole), zap.Int64("msgID", lct.Base.MsgID))
|
||||
lct.Base.MsgType = commonpb.MsgType_LoadCollection
|
||||
lct.Base.SourceID = Params.ProxyCfg.GetNodeID()
|
||||
|
||||
@ -1304,7 +1304,7 @@ func (lct *loadCollectionTask) PreExecute(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (lct *loadCollectionTask) Execute(ctx context.Context) (err error) {
|
||||
log.Debug("loadCollectionTask Execute", zap.String("role", typeutil.ProxyRole), zap.Int64("msgID", lct.Base.MsgID))
|
||||
log.Info("loadCollectionTask Execute", zap.String("role", typeutil.ProxyRole), zap.Int64("msgID", lct.Base.MsgID))
|
||||
collID, err := globalMetaCache.GetCollectionID(ctx, lct.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1353,7 +1353,7 @@ func (lct *loadCollectionTask) Execute(ctx context.Context) (err error) {
|
||||
ReplicaNumber: lct.ReplicaNumber,
|
||||
FieldIndexID: fieldIndexIDs,
|
||||
}
|
||||
log.Debug("send LoadCollectionRequest to query coordinator", zap.String("role", typeutil.ProxyRole),
|
||||
log.Info("send LoadCollectionRequest to query coordinator", zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("msgID", request.Base.MsgID), zap.Int64("collectionID", request.CollectionID),
|
||||
zap.Any("schema", request.Schema))
|
||||
lct.result, err = lct.queryCoord.LoadCollection(ctx, request)
|
||||
@ -1364,7 +1364,7 @@ func (lct *loadCollectionTask) Execute(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
func (lct *loadCollectionTask) PostExecute(ctx context.Context) error {
|
||||
log.Debug("loadCollectionTask PostExecute", zap.String("role", typeutil.ProxyRole),
|
||||
log.Info("loadCollectionTask PostExecute", zap.String("role", typeutil.ProxyRole),
|
||||
zap.Int64("msgID", lct.Base.MsgID))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -135,7 +135,7 @@ func (cit *createIndexTask) parseIndexParams() error {
|
||||
return fmt.Errorf("IndexType should be %s", AutoIndexName)
|
||||
}
|
||||
}
|
||||
log.Debug("create index trigger AutoIndex",
|
||||
log.Info("create index trigger AutoIndex",
|
||||
zap.String("type", Params.AutoIndexConfig.AutoIndexTypeName))
|
||||
// override params
|
||||
for k, v := range Params.AutoIndexConfig.IndexParams {
|
||||
@ -301,7 +301,7 @@ func (cit *createIndexTask) PreExecute(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (cit *createIndexTask) Execute(ctx context.Context) error {
|
||||
log.Debug("proxy create index", zap.Int64("collID", cit.collectionID), zap.Int64("fieldID", cit.fieldSchema.GetFieldID()),
|
||||
log.Info("proxy create index", zap.Int64("collID", cit.collectionID), zap.Int64("fieldID", cit.fieldSchema.GetFieldID()),
|
||||
zap.String("indexName", cit.req.GetIndexName()), zap.Any("typeParams", cit.fieldSchema.GetTypeParams()),
|
||||
zap.Any("indexParams", cit.req.GetExtraParams()))
|
||||
|
||||
|
||||
@ -352,7 +352,7 @@ func (scheduler *taskScheduler) tryPromoteAll() {
|
||||
}
|
||||
|
||||
if len(toPromote) > 0 || len(toRemove) > 0 {
|
||||
log.Debug("promoted tasks",
|
||||
log.Info("promoted tasks",
|
||||
zap.Int("promotedNum", len(toPromote)),
|
||||
zap.Int("toRemoveNum", len(toRemove)))
|
||||
}
|
||||
@ -449,7 +449,7 @@ func (scheduler *taskScheduler) schedule(node int64) {
|
||||
|
||||
scheduler.tryPromoteAll()
|
||||
|
||||
log.Debug("process tasks related to node",
|
||||
log.Info("process tasks related to node",
|
||||
zap.Int("processingTaskNum", scheduler.processQueue.Len()),
|
||||
zap.Int("waitingTaskNum", scheduler.waitQueue.Len()),
|
||||
zap.Int("segmentTaskNum", len(scheduler.segmentTasks)),
|
||||
@ -476,7 +476,7 @@ func (scheduler *taskScheduler) schedule(node int64) {
|
||||
log.Info("processed tasks",
|
||||
zap.Int("toRemoveNum", len(toRemove)))
|
||||
|
||||
log.Debug("process tasks related to node done",
|
||||
log.Info("process tasks related to node done",
|
||||
zap.Int("processingTaskNum", scheduler.processQueue.Len()),
|
||||
zap.Int("waitingTaskNum", scheduler.waitQueue.Len()),
|
||||
zap.Int("segmentTaskNum", len(scheduler.segmentTasks)),
|
||||
@ -591,7 +591,7 @@ func (scheduler *taskScheduler) remove(task Task) {
|
||||
}
|
||||
|
||||
metrics.QueryCoordTaskNum.WithLabelValues().Set(float64(scheduler.tasks.Len()))
|
||||
log.Debug("task removed")
|
||||
log.Info("task removed")
|
||||
}
|
||||
|
||||
func (scheduler *taskScheduler) checkCanceled(task Task) bool {
|
||||
|
||||
@ -154,7 +154,7 @@ func FetchTargets(ctx context.Context,
|
||||
segments := make([]*datapb.SegmentInfo, 0)
|
||||
|
||||
for _, partitionID := range partitions {
|
||||
log.Debug("get recovery info...",
|
||||
log.Info("get recovery info...",
|
||||
zap.Int64("collectionID", collection),
|
||||
zap.Int64("partitionID", partitionID))
|
||||
vChannelInfos, binlogs, err := broker.GetRecoveryInfo(ctx, collection, partitionID)
|
||||
|
||||
@ -587,7 +587,7 @@ func (loader *segmentLoader) loadGrowingSegments(segment *Segment,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("insertNode operator", zap.Int("insert size", numOfRecords), zap.Int64("insert offset", offset), zap.Int64("segment id", segment.ID()))
|
||||
log.Info("insertNode operator", zap.Int("insert size", numOfRecords), zap.Int64("insert offset", offset), zap.Int64("segment id", segment.ID()))
|
||||
|
||||
// 2. update bloom filter
|
||||
insertRecord, err := storage.TransferInsertDataToInsertRecord(insertData)
|
||||
@ -770,7 +770,7 @@ func (loader *segmentLoader) FromDmlCPLoadDelete(ctx context.Context, collection
|
||||
for hasMore {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug("read delta msg from seek position done", zap.Error(ctx.Err()))
|
||||
log.Info("read delta msg from seek position done", zap.Error(ctx.Err()))
|
||||
return ctx.Err()
|
||||
case msgPack, ok := <-stream.Chan():
|
||||
if !ok {
|
||||
|
||||
@ -833,9 +833,9 @@ func (sc *ShardCluster) GetStatistics(ctx context.Context, req *querypb.GetStati
|
||||
segAllocs, versionID := sc.segmentAllocations(req.GetReq().GetPartitionIDs())
|
||||
defer sc.finishUsage(versionID)
|
||||
|
||||
log.Debug("cluster segment distribution", zap.Int("len", len(segAllocs)))
|
||||
log.Info("cluster segment distribution", zap.Int("len", len(segAllocs)))
|
||||
for nodeID, segmentIDs := range segAllocs {
|
||||
log.Debug("segments distribution", zap.Int64("nodeID", nodeID), zap.Int64s("segments", segmentIDs))
|
||||
log.Info("segments distribution", zap.Int64("nodeID", nodeID), zap.Int64s("segments", segmentIDs))
|
||||
}
|
||||
|
||||
// concurrent visiting nodes
|
||||
@ -924,9 +924,9 @@ func (sc *ShardCluster) Search(ctx context.Context, req *querypb.SearchRequest,
|
||||
segAllocs, versionID := sc.segmentAllocations(req.GetReq().GetPartitionIDs())
|
||||
defer sc.finishUsage(versionID)
|
||||
|
||||
log.Debug("cluster segment distribution", zap.Int("len", len(segAllocs)), zap.Int64s("partitionIDs", req.GetReq().GetPartitionIDs()))
|
||||
log.Info("cluster segment distribution", zap.Int("len", len(segAllocs)), zap.Int64s("partitionIDs", req.GetReq().GetPartitionIDs()))
|
||||
for nodeID, segmentIDs := range segAllocs {
|
||||
log.Debug("segments distribution", zap.Int64("nodeID", nodeID), zap.Int64s("segments", segmentIDs))
|
||||
log.Info("segments distribution", zap.Int64("nodeID", nodeID), zap.Int64s("segments", segmentIDs))
|
||||
}
|
||||
|
||||
// concurrent visiting nodes
|
||||
|
||||
@ -103,7 +103,7 @@ func (s *ShardClusterService) releaseShardCluster(vchannelName string) error {
|
||||
}
|
||||
|
||||
func (s *ShardClusterService) close() error {
|
||||
log.Debug("start to close shard cluster service")
|
||||
log.Info("start to close shard cluster service")
|
||||
|
||||
isFinish := true
|
||||
s.clusters.Range(func(key, value any) bool {
|
||||
|
||||
@ -172,7 +172,7 @@ func newDmlChannels(ctx context.Context, factory msgstream.Factory, chanNamePref
|
||||
}
|
||||
|
||||
heap.Init(&d.channelsHeap)
|
||||
log.Debug("init dml channels", zap.Int64("num", chanNum))
|
||||
log.Info("init dml channels", zap.Int64("num", chanNum))
|
||||
metrics.RootCoordNumOfDMLChannel.Add(float64(chanNum))
|
||||
metrics.RootCoordNumOfMsgStream.Add(float64(chanNum))
|
||||
|
||||
|
||||
@ -140,7 +140,7 @@ func (m *importManager) sendOutTasksLoop(wg *sync.WaitGroup) {
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
log.Debug("import manager context done, exit check sendOutTasksLoop")
|
||||
log.Info("import manager context done, exit check sendOutTasksLoop")
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := m.sendOutTasks(m.ctx); err != nil {
|
||||
@ -158,10 +158,10 @@ func (m *importManager) flipTaskStateLoop(wg *sync.WaitGroup) {
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
log.Debug("import manager context done, exit check flipTaskStateLoop")
|
||||
log.Info("import manager context done, exit check flipTaskStateLoop")
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Debug("start trying to flip task state")
|
||||
log.Info("start trying to flip task state")
|
||||
if err := m.flipTaskState(m.ctx); err != nil {
|
||||
log.Error("failed to flip task state", zap.Error(err))
|
||||
}
|
||||
@ -182,15 +182,15 @@ func (m *importManager) cleanupLoop(wg *sync.WaitGroup) {
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
log.Debug("(in cleanupLoop) import manager context done, exit cleanupLoop")
|
||||
log.Info("(in cleanupLoop) import manager context done, exit cleanupLoop")
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Debug("(in cleanupLoop) trying to expire old tasks from memory and Etcd")
|
||||
log.Info("(in cleanupLoop) trying to expire old tasks from memory and Etcd")
|
||||
m.expireOldTasksFromMem()
|
||||
m.expireOldTasksFromEtcd()
|
||||
log.Debug("(in cleanupLoop) start removing bad import segments")
|
||||
log.Info("(in cleanupLoop) start removing bad import segments")
|
||||
m.removeBadImportSegments(m.ctx)
|
||||
log.Debug("(in cleanupLoop) start cleaning hanging busy DataNode")
|
||||
log.Info("(in cleanupLoop) start cleaning hanging busy DataNode")
|
||||
m.releaseHangingBusyDataNode()
|
||||
}
|
||||
}
|
||||
@ -205,7 +205,7 @@ func (m *importManager) sendOutTasks(ctx context.Context) error {
|
||||
|
||||
// Trigger Import() action to DataCoord.
|
||||
for len(m.pendingTasks) > 0 {
|
||||
log.Debug("try to send out pending tasks", zap.Int("task_number", len(m.pendingTasks)))
|
||||
log.Info("try to send out pending tasks", zap.Int("task_number", len(m.pendingTasks)))
|
||||
task := m.pendingTasks[0]
|
||||
// TODO: Use ImportTaskInfo directly.
|
||||
it := &datapb.ImportTask{
|
||||
@ -242,7 +242,7 @@ func (m *importManager) sendOutTasks(ctx context.Context) error {
|
||||
|
||||
// Successfully assigned dataNode for the import task. Add task to working task list and update task store.
|
||||
task.DatanodeId = resp.GetDatanodeId()
|
||||
log.Debug("import task successfully assigned to dataNode",
|
||||
log.Info("import task successfully assigned to dataNode",
|
||||
zap.Int64("task ID", it.GetTaskId()),
|
||||
zap.Int64("dataNode ID", task.GetDatanodeId()))
|
||||
// Add new working dataNode to busyNodes.
|
||||
@ -250,7 +250,7 @@ func (m *importManager) sendOutTasks(ctx context.Context) error {
|
||||
err = func() error {
|
||||
m.workingLock.Lock()
|
||||
defer m.workingLock.Unlock()
|
||||
log.Debug("import task added as working task", zap.Int64("task ID", it.TaskId))
|
||||
log.Info("import task added as working task", zap.Int64("task ID", it.TaskId))
|
||||
task.State.StateCode = commonpb.ImportState_ImportStarted
|
||||
task.StartTs = time.Now().Unix()
|
||||
// first update the import task into meta store and then put it into working tasks
|
||||
@ -436,7 +436,7 @@ func (m *importManager) importJob(ctx context.Context, req *milvuspb.ImportReque
|
||||
Tasks: make([]int64, 0),
|
||||
}
|
||||
|
||||
log.Debug("receive import job",
|
||||
log.Info("receive import job",
|
||||
zap.String("collection name", req.GetCollectionName()),
|
||||
zap.Int64("collection ID", cID),
|
||||
zap.Int64("partition ID", pID))
|
||||
@ -559,7 +559,7 @@ func (m *importManager) updateTaskInfo(ir *rootcoordpb.ImportResult) (*datapb.Im
|
||||
if ir == nil {
|
||||
return nil, errors.New("import result is nil")
|
||||
}
|
||||
log.Debug("import manager update task import result", zap.Int64("taskID", ir.GetTaskId()))
|
||||
log.Info("import manager update task import result", zap.Int64("taskID", ir.GetTaskId()))
|
||||
|
||||
found := false
|
||||
var v *datapb.ImportTaskInfo
|
||||
@ -598,7 +598,7 @@ func (m *importManager) updateTaskInfo(ir *rootcoordpb.ImportResult) (*datapb.Im
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.Debug("import manager update task import result failed", zap.Int64("task ID", ir.GetTaskId()))
|
||||
log.Info("import manager update task import result failed", zap.Int64("task ID", ir.GetTaskId()))
|
||||
return nil, errors.New("failed to update import task, ID not found: " + strconv.FormatInt(ir.TaskId, 10))
|
||||
}
|
||||
return toPersistImportTaskInfo, nil
|
||||
@ -726,7 +726,7 @@ func (m *importManager) getTaskState(tID int64) *milvuspb.GetImportStateResponse
|
||||
},
|
||||
Infos: make([]*commonpb.KeyValuePair, 0),
|
||||
}
|
||||
log.Debug("getting import task state", zap.Int64("task ID", tID))
|
||||
log.Info("getting import task state", zap.Int64("task ID", tID))
|
||||
// (1) Search in pending tasks list.
|
||||
found := false
|
||||
m.pendingLock.Lock()
|
||||
@ -768,7 +768,7 @@ func (m *importManager) getTaskState(tID int64) *milvuspb.GetImportStateResponse
|
||||
if found {
|
||||
return resp
|
||||
}
|
||||
log.Debug("get import task state failed", zap.Int64("taskID", tID))
|
||||
log.Info("get import task state failed", zap.Int64("taskID", tID))
|
||||
return resp
|
||||
}
|
||||
|
||||
|
||||
@ -84,7 +84,7 @@ func (p *proxyManager) WatchProxy() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("succeed to init sessions on etcd", zap.Any("sessions", sessions), zap.Int64("revision", rev))
|
||||
log.Info("succeed to init sessions on etcd", zap.Any("sessions", sessions), zap.Int64("revision", rev))
|
||||
// all init function should be clear meta firstly.
|
||||
for _, f := range p.initSessionsFunc {
|
||||
f(sessions)
|
||||
@ -103,7 +103,7 @@ func (p *proxyManager) WatchProxy() error {
|
||||
}
|
||||
|
||||
func (p *proxyManager) startWatchEtcd(ctx context.Context, eventCh clientv3.WatchChan) {
|
||||
log.Debug("start to watch etcd")
|
||||
log.Info("start to watch etcd")
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -149,7 +149,7 @@ func (p *proxyManager) handlePutEvent(e *clientv3.Event) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("received proxy put event with session", zap.Any("session", session))
|
||||
log.Info("received proxy put event with session", zap.Any("session", session))
|
||||
for _, f := range p.addSessionsFunc {
|
||||
f(session)
|
||||
}
|
||||
@ -162,7 +162,7 @@ func (p *proxyManager) handleDeleteEvent(e *clientv3.Event) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("received proxy delete event with session", zap.Any("session", session))
|
||||
log.Info("received proxy delete event with session", zap.Any("session", session))
|
||||
for _, f := range p.delSessionsFunc {
|
||||
f(session)
|
||||
}
|
||||
@ -194,7 +194,7 @@ func (p *proxyManager) getSessionsOnEtcd(ctx context.Context) ([]*sessionutil.Se
|
||||
for _, v := range resp.Kvs {
|
||||
session, err := p.parseSession(v.Value)
|
||||
if err != nil {
|
||||
log.Debug("failed to unmarshal session", zap.Error(err))
|
||||
log.Warn("failed to unmarshal session", zap.Error(err))
|
||||
return nil, 0, err
|
||||
}
|
||||
sessions = append(sessions, session)
|
||||
@ -226,7 +226,7 @@ func listProxyInEtcd(ctx context.Context, cli *clientv3.Client) (map[int64]*sess
|
||||
var s sessionutil.Session
|
||||
err := json.Unmarshal(v.Value, &s)
|
||||
if err != nil {
|
||||
log.Debug("unmarshal SvrSession failed", zap.Error(err))
|
||||
log.Warn("unmarshal SvrSession failed", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
sess[s.ServerID] = &s
|
||||
|
||||
@ -297,7 +297,7 @@ func (c *Core) Register() error {
|
||||
c.session.ProcessActiveStandBy(c.activateFunc)
|
||||
} else {
|
||||
c.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
log.Debug("RootCoord start successfully ", zap.String("State Code", commonpb.StateCode_Healthy.String()))
|
||||
log.Info("RootCoord start successfully ", zap.String("State Code", commonpb.StateCode_Healthy.String()))
|
||||
}
|
||||
log.Info("RootCoord Register Finished")
|
||||
go c.session.LivenessCheck(c.ctx, func() {
|
||||
@ -470,7 +470,7 @@ func (c *Core) initInternal() error {
|
||||
c.metricsCacheManager = metricsinfo.NewMetricsCacheManager()
|
||||
|
||||
c.quotaCenter = NewQuotaCenter(c.proxyClientManager, c.queryCoord, c.dataCoord, c.tsoAllocator)
|
||||
log.Debug("RootCoord init QuotaCenter done")
|
||||
log.Info("RootCoord init QuotaCenter done")
|
||||
|
||||
if err := c.initImportManager(); err != nil {
|
||||
return err
|
||||
@ -499,7 +499,7 @@ func (c *Core) Init() error {
|
||||
func (c *Core) initCredentials() error {
|
||||
credInfo, _ := c.meta.GetCredential(util.UserRoot)
|
||||
if credInfo == nil {
|
||||
log.Debug("RootCoord init user root")
|
||||
log.Info("RootCoord init user root")
|
||||
encryptedRootPassword, _ := crypto.PasswordEncrypt(util.DefaultRootPassword)
|
||||
err := c.meta.AddCredential(&internalpb.CredentialInfo{Username: util.UserRoot, EncryptedPassword: encryptedRootPassword})
|
||||
return err
|
||||
@ -1739,7 +1739,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (
|
||||
zap.Int64("task ID", ir.GetTaskId()))
|
||||
resendTaskFunc()
|
||||
} else if ir.GetState() != commonpb.ImportState_ImportPersisted {
|
||||
log.Debug("unexpected import task state reported, return immediately (this should not happen)",
|
||||
log.Warn("unexpected import task state reported, return immediately (this should not happen)",
|
||||
zap.Any("task ID", ir.GetTaskId()),
|
||||
zap.Any("import state", ir.GetState()))
|
||||
resendTaskFunc()
|
||||
@ -1798,7 +1798,7 @@ func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.Creden
|
||||
method := "CreateCredential"
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
log.Debug("CreateCredential", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("CreateCredential", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", credInfo.Username))
|
||||
|
||||
// insert to db
|
||||
@ -1816,7 +1816,7 @@ func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.Creden
|
||||
zap.String("username", credInfo.Username), zap.Error(err))
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
}
|
||||
log.Debug("CreateCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("CreateCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", credInfo.Username))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
@ -1830,7 +1830,7 @@ func (c *Core) GetCredential(ctx context.Context, in *rootcoordpb.GetCredentialR
|
||||
method := "GetCredential"
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
log.Debug("GetCredential", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("GetCredential", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", in.Username))
|
||||
|
||||
credInfo, err := c.meta.GetCredential(in.Username)
|
||||
@ -1842,7 +1842,7 @@ func (c *Core) GetCredential(ctx context.Context, in *rootcoordpb.GetCredentialR
|
||||
Status: failStatus(commonpb.ErrorCode_GetCredentialFailure, "GetCredential failed: "+err.Error()),
|
||||
}, err
|
||||
}
|
||||
log.Debug("GetCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("GetCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", in.Username))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
@ -1859,7 +1859,7 @@ func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.Creden
|
||||
method := "UpdateCredential"
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc()
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
log.Debug("UpdateCredential", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("UpdateCredential", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", credInfo.Username))
|
||||
// update data on storage
|
||||
err := c.meta.AlterCredential(credInfo)
|
||||
@ -1877,7 +1877,7 @@ func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.Creden
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return failStatus(commonpb.ErrorCode_UpdateCredentialFailure, "UpdateCredential failed: "+err.Error()), nil
|
||||
}
|
||||
log.Debug("UpdateCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("UpdateCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", credInfo.Username))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
@ -1907,7 +1907,7 @@ func (c *Core) DeleteCredential(ctx context.Context, in *milvuspb.DeleteCredenti
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc()
|
||||
return failStatus(commonpb.ErrorCode_DeleteCredentialFailure, "DeleteCredential failed: "+err.Error()), nil
|
||||
}
|
||||
log.Debug("DeleteCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
log.Info("DeleteCredential success", zap.String("role", typeutil.RootCoordRole),
|
||||
zap.String("username", in.Username))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
@ -1931,7 +1931,7 @@ func (c *Core) ListCredUsers(ctx context.Context, in *milvuspb.ListCredUsersRequ
|
||||
Status: failStatus(commonpb.ErrorCode_ListCredUsersFailure, "ListCredUsers failed: "+err.Error()),
|
||||
}, err
|
||||
}
|
||||
log.Debug("ListCredUsers success", zap.String("role", typeutil.RootCoordRole))
|
||||
log.Info("ListCredUsers success", zap.String("role", typeutil.RootCoordRole))
|
||||
|
||||
metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc()
|
||||
metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||
|
||||
@ -42,8 +42,9 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
//go:generate mockery --name=Allocator --outpkg=mocktso
|
||||
// Allocator is a Timestamp Oracle allocator.
|
||||
//
|
||||
//go:generate mockery --name=Allocator --outpkg=mocktso
|
||||
type Allocator interface {
|
||||
// Initialize is used to initialize a TSO allocator.
|
||||
// It will synchronize TSO with etcd and initialize the
|
||||
|
||||
@ -159,10 +159,10 @@ func (t *timestampOracle) ResetUserTimestamp(tso uint64) error {
|
||||
|
||||
// UpdateTimestamp is used to update the timestamp.
|
||||
// This function will do two things:
|
||||
// 1. When the logical time is going to be used up, increase the current physical time.
|
||||
// 2. When the time window is not big enough, which means the saved etcd time minus the next physical time
|
||||
// will be less than or equal to `updateTimestampGuard`, then the time window needs to be updated and
|
||||
// we also need to save the next physical time plus `TsoSaveInterval` into etcd.
|
||||
// 1. When the logical time is going to be used up, increase the current physical time.
|
||||
// 2. When the time window is not big enough, which means the saved etcd time minus the next physical time
|
||||
// will be less than or equal to `updateTimestampGuard`, then the time window needs to be updated and
|
||||
// we also need to save the next physical time plus `TsoSaveInterval` into etcd.
|
||||
//
|
||||
// Here is some constraints that this function must satisfy:
|
||||
// 1. The saved time is monotonically increasing.
|
||||
|
||||
@ -109,7 +109,7 @@ func (nodeCtx *nodeCtx) work() {
|
||||
for {
|
||||
select {
|
||||
case <-nodeCtx.closeCh:
|
||||
log.Debug("flow graph node closed", zap.String("nodeName", nodeCtx.node.Name()))
|
||||
log.Info("flow graph node closed", zap.String("nodeName", nodeCtx.node.Name()))
|
||||
return
|
||||
default:
|
||||
// inputs from inputsMessages for Operate
|
||||
|
||||
@ -130,7 +130,7 @@ func GetPulsarConfig(protocol, ip, port, url string, args ...int64) (map[string]
|
||||
var err error
|
||||
|
||||
getResp := func() error {
|
||||
log.Debug("function util", zap.String("url", protocol+"://"+ip+":"+port+url))
|
||||
log.Info("function util", zap.String("url", protocol+"://"+ip+":"+port+url))
|
||||
resp, err = http.Get(protocol + "://" + ip + ":" + port + url)
|
||||
return err
|
||||
}
|
||||
@ -146,14 +146,14 @@ func GetPulsarConfig(protocol, ip, port, url string, args ...int64) (map[string]
|
||||
|
||||
err = retry.Do(context.TODO(), getResp, retry.Attempts(attempt), retry.Sleep(interval))
|
||||
if err != nil {
|
||||
log.Debug("failed to get config", zap.String("error", err.Error()))
|
||||
log.Warn("failed to get config", zap.String("error", err.Error()))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
log.Debug("get config", zap.String("config", string(body)))
|
||||
log.Info("get config", zap.String("config", string(body)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -527,9 +527,9 @@ func (p *quotaConfig) initDiskQuota() {
|
||||
p.DiskQuota = defaultDiskQuotaInMB
|
||||
}
|
||||
if p.DiskQuota < defaultDiskQuotaInMB {
|
||||
log.Debug("init disk quota", zap.String("diskQuota(MB)", fmt.Sprintf("%v", p.DiskQuota)))
|
||||
log.Info("init disk quota", zap.String("diskQuota(MB)", fmt.Sprintf("%v", p.DiskQuota)))
|
||||
} else {
|
||||
log.Debug("init disk quota", zap.String("diskQuota(MB)", "+inf"))
|
||||
log.Info("init disk quota", zap.String("diskQuota(MB)", "+inf"))
|
||||
}
|
||||
// megabytes to bytes
|
||||
p.DiskQuota = megaBytes2Bytes(p.DiskQuota)
|
||||
|
||||
@ -63,7 +63,7 @@ func (p *ServiceParam) Init() {
|
||||
p.MetaStoreCfg.init(&p.BaseTable)
|
||||
p.EtcdCfg.init(&p.BaseTable)
|
||||
if p.MetaStoreCfg.MetaStoreType == util.MetaStoreTypeMysql {
|
||||
log.Debug("Mysql protocol is used as meta store")
|
||||
log.Info("Mysql protocol is used as meta store")
|
||||
p.DBCfg.init(&p.BaseTable)
|
||||
}
|
||||
p.PulsarCfg.init(&p.BaseTable)
|
||||
|
||||
@ -36,7 +36,7 @@ func Do(ctx context.Context, fn func() error, opts ...Option) error {
|
||||
for i := uint(0); i < c.attempts; i++ {
|
||||
if err := fn(); err != nil {
|
||||
if i%10 == 0 {
|
||||
log.Debug("retry func failed", zap.Uint("retry time", i), zap.Error(err))
|
||||
log.Warn("retry func failed", zap.Uint("retry time", i), zap.Error(err))
|
||||
}
|
||||
|
||||
el = append(el, err)
|
||||
|
||||
@ -182,7 +182,7 @@ func NewSession(ctx context.Context, metaRoot string, client *clientv3.Client, o
|
||||
session.UpdateRegistered(false)
|
||||
|
||||
connectEtcdFn := func() error {
|
||||
log.Debug("Session try to connect to etcd")
|
||||
log.Info("Session try to connect to etcd")
|
||||
ctx2, cancel2 := context.WithTimeout(session.ctx, 5*time.Second)
|
||||
defer cancel2()
|
||||
if _, err := client.Get(ctx2, "health"); err != nil {
|
||||
@ -197,7 +197,7 @@ func NewSession(ctx context.Context, metaRoot string, client *clientv3.Client, o
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
log.Debug("Session connect to etcd success")
|
||||
log.Info("Session connect to etcd success")
|
||||
return session
|
||||
}
|
||||
|
||||
@ -279,7 +279,7 @@ func (s *Session) getServerIDWithKey(key string) (int64, error) {
|
||||
log.Warn("Session Txn unsuccessful", zap.String("key", key))
|
||||
continue
|
||||
}
|
||||
log.Debug("Session get serverID success", zap.String("key", key), zap.Int64("ServerId", valueInt))
|
||||
log.Info("Session get serverID success", zap.String("key", key), zap.Int64("ServerId", valueInt))
|
||||
return valueInt, nil
|
||||
}
|
||||
}
|
||||
@ -307,7 +307,7 @@ func (s *Session) registerService() (<-chan *clientv3.LeaseKeepAliveResponse, er
|
||||
}
|
||||
completeKey := path.Join(s.metaRoot, DefaultServiceRoot, key)
|
||||
var ch <-chan *clientv3.LeaseKeepAliveResponse
|
||||
log.Debug("service begin to register to etcd", zap.String("serverName", s.ServerName), zap.Int64("ServerID", s.ServerID))
|
||||
log.Info("service begin to register to etcd", zap.String("serverName", s.ServerName), zap.Int64("ServerID", s.ServerID))
|
||||
|
||||
ttl := s.sessionTTL
|
||||
retryTimes := s.sessionRetryTimes
|
||||
@ -344,7 +344,7 @@ func (s *Session) registerService() (<-chan *clientv3.LeaseKeepAliveResponse, er
|
||||
if !txnResp.Succeeded {
|
||||
return fmt.Errorf("function CompareAndSwap error for compare is false for key: %s", key)
|
||||
}
|
||||
log.Debug("put session key into etcd", zap.String("key", completeKey), zap.String("value", string(sessionJSON)))
|
||||
log.Info("put session key into etcd", zap.String("key", completeKey), zap.String("value", string(sessionJSON)))
|
||||
|
||||
keepAliveCtx, keepAliveCancel := context.WithCancel(context.Background())
|
||||
s.keepAliveCancel = func() {
|
||||
@ -419,7 +419,7 @@ func (s *Session) GetSessions(prefix string) (map[string]*Session, int64, error)
|
||||
return nil, 0, err
|
||||
}
|
||||
_, mapKey := path.Split(string(kv.Key))
|
||||
log.Debug("SessionUtil GetSessions ", zap.Any("prefix", prefix),
|
||||
log.Info("SessionUtil GetSessions ", zap.Any("prefix", prefix),
|
||||
zap.String("key", mapKey),
|
||||
zap.Any("address", session.Address))
|
||||
res[mapKey] = session
|
||||
@ -444,11 +444,11 @@ func (s *Session) GetSessionsWithVersionRange(prefix string, r semver.Range) (ma
|
||||
return nil, 0, err
|
||||
}
|
||||
if !r(session.Version) {
|
||||
log.Debug("Session version out of range", zap.String("version", session.Version.String()), zap.Int64("serverID", session.ServerID))
|
||||
log.Info("Session version out of range", zap.String("version", session.Version.String()), zap.Int64("serverID", session.ServerID))
|
||||
continue
|
||||
}
|
||||
_, mapKey := path.Split(string(kv.Key))
|
||||
log.Debug("SessionUtil GetSessions ", zap.String("prefix", prefix),
|
||||
log.Info("SessionUtil GetSessions ", zap.String("prefix", prefix),
|
||||
zap.String("key", mapKey),
|
||||
zap.String("address", session.Address))
|
||||
res[mapKey] = session
|
||||
@ -545,7 +545,7 @@ func (w *sessionWatcher) handleWatchResponse(wresp clientv3.WatchResponse) {
|
||||
var eventType SessionEventType
|
||||
switch ev.Type {
|
||||
case mvccpb.PUT:
|
||||
log.Debug("watch services",
|
||||
log.Info("watch services",
|
||||
zap.Any("add kv", ev.Kv))
|
||||
err := json.Unmarshal([]byte(ev.Kv.Value), session)
|
||||
if err != nil {
|
||||
@ -557,7 +557,7 @@ func (w *sessionWatcher) handleWatchResponse(wresp clientv3.WatchResponse) {
|
||||
}
|
||||
eventType = SessionAddEvent
|
||||
case mvccpb.DELETE:
|
||||
log.Debug("watch services",
|
||||
log.Info("watch services",
|
||||
zap.Any("delete kv", ev.PrevKv))
|
||||
err := json.Unmarshal([]byte(ev.PrevKv.Value), session)
|
||||
if err != nil {
|
||||
@ -569,7 +569,7 @@ func (w *sessionWatcher) handleWatchResponse(wresp clientv3.WatchResponse) {
|
||||
}
|
||||
eventType = SessionDelEvent
|
||||
}
|
||||
log.Debug("WatchService", zap.Any("event type", eventType))
|
||||
log.Info("WatchService", zap.Any("event type", eventType))
|
||||
w.eventCh <- &SessionEvent{
|
||||
EventType: eventType,
|
||||
Session: session,
|
||||
@ -627,7 +627,7 @@ func (s *Session) LivenessCheck(ctx context.Context, callback func()) {
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
log.Debug("liveness exits due to context done")
|
||||
log.Info("liveness exits due to context done")
|
||||
// cancel the etcd keepAlive context
|
||||
if s.keepAliveCancel != nil {
|
||||
s.keepAliveCancel()
|
||||
@ -721,7 +721,7 @@ func (s *Session) ProcessActiveStandBy(activateFunc func()) error {
|
||||
log.Info(fmt.Sprintf("serverName: %v enter STANDBY mode", s.ServerName))
|
||||
go func() {
|
||||
for s.isStandby.Load().(bool) {
|
||||
log.Debug(fmt.Sprintf("serverName: %v is in STANDBY ...", s.ServerName))
|
||||
log.Info(fmt.Sprintf("serverName: %v is in STANDBY ...", s.ServerName))
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}()
|
||||
@ -751,9 +751,9 @@ func (s *Session) ProcessActiveStandBy(activateFunc func()) error {
|
||||
for _, event := range wresp.Events {
|
||||
switch event.Type {
|
||||
case mvccpb.PUT:
|
||||
log.Debug("watch the ACTIVE key", zap.Any("ADD", event.Kv))
|
||||
log.Info("watch the ACTIVE key", zap.Any("ADD", event.Kv))
|
||||
case mvccpb.DELETE:
|
||||
log.Debug("watch the ACTIVE key", zap.Any("DELETE", event.Kv))
|
||||
log.Info("watch the ACTIVE key", zap.Any("DELETE", event.Kv))
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user