fix: Metrics with collectionName but no databaseName label are causing name conflicts and confusion (#43277) (#43808)

issue: https://github.com/milvus-io/milvus/issues/43277

---------

Signed-off-by: PjJinchen <6268414+pj1987111@users.noreply.github.com>
This commit is contained in:
PjJinchen 2025-08-15 01:37:44 +08:00 committed by GitHub
parent 412a0eb1c3
commit 64633cc5b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 111 additions and 49 deletions

2
go.sum
View File

@ -1734,4 +1734,4 @@ sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c=
stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0=
stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0=

View File

@ -120,6 +120,7 @@ func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *p
log.Info("received request to invalidate collection meta cache")
dbName := request.DbName
collectionName := request.CollectionName
collectionID := request.CollectionID
msgType := request.GetBase().GetMsgType()
@ -192,9 +193,9 @@ func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *p
// no need to handle error, since this Proxy may not create dml stream for the collection.
node.chMgr.removeDMLStream(request.GetCollectionID())
// clean up collection level metrics
metrics.CleanupProxyCollectionMetrics(paramtable.GetNodeID(), collectionName)
metrics.CleanupProxyCollectionMetrics(paramtable.GetNodeID(), dbName, collectionName)
for _, alias := range aliasName {
metrics.CleanupProxyCollectionMetrics(paramtable.GetNodeID(), alias)
metrics.CleanupProxyCollectionMetrics(paramtable.GetNodeID(), dbName, alias)
}
DeregisterSubLabel(ratelimitutil.GetCollectionSubLabel(request.GetDbName(), request.GetCollectionName()))
} else if msgType == commonpb.MsgType_DropDatabase {
@ -2591,6 +2592,7 @@ func (node *Proxy) Insert(ctx context.Context, request *milvuspb.InsertRequest)
metrics.GetStats(ctx).
SetNodeID(paramtable.GetNodeID()).
SetInboundLabel(metrics.InsertLabel).
SetDatabaseName(request.GetDbName()).
SetCollectionName(request.GetCollectionName())
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.TotalLabel, request.GetDbName(), request.GetCollectionName()).Inc()
@ -2696,7 +2698,7 @@ func (node *Proxy) Insert(ctx context.Context, request *milvuspb.InsertRequest)
WithLabelValues(nodeID, metrics.InsertLabel, dbName, collectionName).
Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyCollectionMutationLatency.
WithLabelValues(nodeID, metrics.InsertLabel, collectionName).
WithLabelValues(nodeID, metrics.InsertLabel, dbName, collectionName).
Observe(float64(tr.ElapseSpan().Milliseconds()))
return it.result, nil
}
@ -2719,6 +2721,7 @@ func (node *Proxy) Delete(ctx context.Context, request *milvuspb.DeleteRequest)
metrics.GetStats(ctx).
SetNodeID(paramtable.GetNodeID()).
SetInboundLabel(metrics.DeleteLabel).
SetDatabaseName(request.GetDbName()).
SetCollectionName(request.GetCollectionName())
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
@ -2797,7 +2800,7 @@ func (node *Proxy) Delete(ctx context.Context, request *milvuspb.DeleteRequest)
metrics.ProxyMutationLatency.
WithLabelValues(nodeID, metrics.DeleteLabel, dbName, collectionName).
Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyCollectionMutationLatency.WithLabelValues(nodeID, metrics.DeleteLabel, collectionName).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyCollectionMutationLatency.WithLabelValues(nodeID, metrics.DeleteLabel, dbName, collectionName).Observe(float64(tr.ElapseSpan().Milliseconds()))
return dr.result, nil
}
@ -2826,6 +2829,7 @@ func (node *Proxy) Upsert(ctx context.Context, request *milvuspb.UpsertRequest)
metrics.GetStats(ctx).
SetNodeID(paramtable.GetNodeID()).
SetInboundLabel(metrics.UpsertLabel).
SetDatabaseName(request.GetDbName()).
SetCollectionName(request.GetCollectionName())
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.TotalLabel, request.GetDbName(), request.GetCollectionName()).Inc()
@ -2940,7 +2944,7 @@ func (node *Proxy) Upsert(ctx context.Context, request *milvuspb.UpsertRequest)
metrics.ProxyMutationLatency.
WithLabelValues(nodeID, metrics.UpsertLabel, dbName, collectionName).
Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyCollectionMutationLatency.WithLabelValues(nodeID, metrics.UpsertLabel, collectionName).Observe(float64(tr.ElapseSpan().Milliseconds()))
metrics.ProxyCollectionMutationLatency.WithLabelValues(nodeID, metrics.UpsertLabel, dbName, collectionName).Observe(float64(tr.ElapseSpan().Milliseconds()))
log.Debug("Finish processing upsert request in Proxy")
return it.result, nil
@ -2978,6 +2982,7 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
metrics.ProxyRetrySearchCount.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.SearchLabel,
request.GetDbName(),
request.GetCollectionName(),
).Inc()
// result size still insufficient
@ -2985,6 +2990,7 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
metrics.ProxyRetrySearchResultInsufficientCount.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.SearchLabel,
request.GetDbName(),
request.GetCollectionName(),
).Inc()
}
@ -2999,6 +3005,7 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
metrics.ProxyRecallSearchCount.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.SearchLabel,
request.GetDbName(),
request.GetCollectionName(),
).Inc()
if merr.Ok(rspGT.GetStatus()) {
@ -3023,11 +3030,13 @@ func (node *Proxy) search(ctx context.Context, request *milvuspb.SearchRequest,
metrics.GetStats(ctx).
SetNodeID(paramtable.GetNodeID()).
SetInboundLabel(metrics.SearchLabel).
SetDatabaseName(request.GetDbName()).
SetCollectionName(request.GetCollectionName())
metrics.ProxyReceivedNQ.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.SearchLabel,
request.GetDbName(),
request.GetCollectionName(),
).Add(float64(request.GetNq()))
@ -3204,6 +3213,7 @@ func (node *Proxy) search(ctx context.Context, request *milvuspb.SearchRequest,
metrics.ProxyCollectionSQLatency.WithLabelValues(
nodeID,
metrics.SearchLabel,
dbName,
collectionName,
).Observe(float64(searchDur))
@ -3243,6 +3253,7 @@ func (node *Proxy) HybridSearch(ctx context.Context, request *milvuspb.HybridSea
metrics.ProxyRetrySearchCount.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.HybridSearchLabel,
request.GetDbName(),
request.GetCollectionName(),
).Inc()
// result size still insufficient
@ -3250,6 +3261,7 @@ func (node *Proxy) HybridSearch(ctx context.Context, request *milvuspb.HybridSea
metrics.ProxyRetrySearchResultInsufficientCount.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.HybridSearchLabel,
request.GetDbName(),
request.GetCollectionName(),
).Inc()
}
@ -3284,6 +3296,7 @@ func (node *Proxy) hybridSearch(ctx context.Context, request *milvuspb.HybridSea
metrics.GetStats(ctx).
SetNodeID(paramtable.GetNodeID()).
SetInboundLabel(metrics.HybridSearchLabel).
SetDatabaseName(request.GetDbName()).
SetCollectionName(request.GetCollectionName())
if err := merr.CheckHealthy(node.GetStateCode()); err != nil {
@ -3438,6 +3451,7 @@ func (node *Proxy) hybridSearch(ctx context.Context, request *milvuspb.HybridSea
metrics.ProxyCollectionSQLatency.WithLabelValues(
nodeID,
metrics.HybridSearchLabel,
dbName,
collectionName,
).Observe(float64(searchDur))
@ -3699,6 +3713,7 @@ func (node *Proxy) query(ctx context.Context, qt *queryTask, sp trace.Span) (*mi
metrics.ProxyCollectionSQLatency.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.QueryLabel,
request.DbName,
request.CollectionName,
).Observe(float64(tr.ElapseSpan().Milliseconds()))
}
@ -3729,10 +3744,12 @@ func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (*
metrics.GetStats(ctx).
SetNodeID(paramtable.GetNodeID()).
SetInboundLabel(metrics.QueryLabel).
SetDatabaseName(request.GetDbName()).
SetCollectionName(request.GetCollectionName())
metrics.ProxyReceivedNQ.WithLabelValues(
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.QueryLabel,
request.GetDbName(),
request.GetCollectionName(),
).Add(float64(1))

View File

@ -443,7 +443,7 @@ func (q *QuotaCenter) collectMetrics() error {
q.collectionIDToDBID.Insert(collectionID, coll.DBID)
q.collections.Insert(FormatCollectionKey(coll.DBID, coll.Name), collectionID)
if numEntity, ok := numEntitiesLoaded[collectionID]; ok {
metrics.RootCoordNumEntities.WithLabelValues(coll.Name, metrics.LoadedLabel).Set(float64(numEntity))
metrics.RootCoordNumEntities.WithLabelValues(coll.DBName, coll.Name, metrics.LoadedLabel).Set(float64(numEntity))
}
return true
})
@ -503,7 +503,7 @@ func (q *QuotaCenter) collectMetrics() error {
return true
}
if datacoordCollectionMetric, ok := collectionMetrics[collectionID]; ok {
metrics.RootCoordNumEntities.WithLabelValues(coll.Name, metrics.TotalLabel).Set(float64(datacoordCollectionMetric.NumEntitiesTotal))
metrics.RootCoordNumEntities.WithLabelValues(coll.DBName, coll.Name, metrics.TotalLabel).Set(float64(datacoordCollectionMetric.NumEntitiesTotal))
fields := lo.KeyBy(coll.Fields, func(v *model.Field) int64 { return v.FieldID })
for _, indexInfo := range datacoordCollectionMetric.IndexInfo {
if _, ok := fields[indexInfo.FieldID]; !ok {
@ -511,6 +511,7 @@ func (q *QuotaCenter) collectMetrics() error {
}
field := fields[indexInfo.FieldID]
metrics.RootCoordIndexedNumEntities.WithLabelValues(
coll.DBName,
coll.Name,
indexInfo.IndexName,
strconv.FormatBool(typeutil.IsVectorType(field.DataType))).Set(float64(indexInfo.NumEntitiesIndexed))

View File

@ -32,12 +32,21 @@ type milvusStatsKey struct{}
// it should be attached to context so that request sizing could be avoided
type RPCStats struct {
fullMethodName string
databaseName string
collectionName string
inboundPayloadSize int
inboundLabel string
nodeID int64
}
func (s *RPCStats) SetDatabaseName(collName string) *RPCStats {
if s == nil {
return s
}
s.databaseName = collName
return s
}
func (s *RPCStats) SetCollectionName(collName string) *RPCStats {
if s == nil {
return s
@ -143,7 +152,7 @@ func (h *grpcSizeStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats)
nodeIDValue := strconv.FormatInt(mstats.nodeID, 10)
ProxyReceiveBytes.WithLabelValues(
nodeIDValue,
mstats.inboundLabel, mstats.collectionName).Add(float64(mstats.inboundPayloadSize))
mstats.inboundLabel, mstats.databaseName, mstats.collectionName).Add(float64(mstats.inboundPayloadSize))
// set outbound payload size metrics for marked methods
if h.shouldRecordOutbound(mstats.fullMethodName) {
ProxyReadReqSendBytes.WithLabelValues(nodeIDValue).Add(float64(rs.Length))

View File

@ -31,7 +31,7 @@ var (
Subsystem: typeutil.ProxyRole,
Name: "received_nq",
Help: "counter of nq of received search and query requests",
}, []string{nodeIDLabelName, queryTypeLabelName, collectionName})
}, []string{nodeIDLabelName, queryTypeLabelName, databaseLabelName, collectionName})
// ProxySearchVectors record the number of vectors search successfully.
ProxySearchVectors = prometheus.NewCounterVec(
@ -87,7 +87,7 @@ var (
Name: "collection_sq_latency",
Help: "latency of search or query successfully, per collection",
Buckets: buckets,
}, []string{nodeIDLabelName, queryTypeLabelName, collectionName})
}, []string{nodeIDLabelName, queryTypeLabelName, databaseLabelName, collectionName})
// ProxyMutationLatency record the latency that mutate successfully.
ProxyMutationLatency = prometheus.NewHistogramVec(
@ -108,7 +108,7 @@ var (
Name: "collection_mutation_latency",
Help: "latency of insert or delete successfully, per collection",
Buckets: buckets,
}, []string{nodeIDLabelName, msgTypeLabelName, collectionName})
}, []string{nodeIDLabelName, msgTypeLabelName, databaseLabelName, collectionName})
// ProxyWaitForSearchResultLatency record the time that the proxy waits for the search result.
ProxyWaitForSearchResultLatency = prometheus.NewHistogramVec(
@ -252,7 +252,7 @@ var (
Subsystem: typeutil.ProxyRole,
Name: "receive_bytes_count",
Help: "count of bytes received from sdk",
}, []string{nodeIDLabelName, msgTypeLabelName, collectionName})
}, []string{nodeIDLabelName, msgTypeLabelName, databaseLabelName, collectionName})
// ProxyReadReqSendBytes record the bytes sent back to client by Proxy
ProxyReadReqSendBytes = prometheus.NewCounterVec(
@ -397,7 +397,7 @@ var (
Subsystem: typeutil.ProxyRole,
Name: "retry_search_cnt",
Help: "counter of retry search",
}, []string{nodeIDLabelName, queryTypeLabelName, collectionName})
}, []string{nodeIDLabelName, queryTypeLabelName, databaseLabelName, collectionName})
// ProxyRetrySearchResultInsufficientCount records the retry search without reducing topk that still not meet result limit
// there are more likely some non-index-related reasons like we do not have enough entities for very big k, duplicate pks, etc
@ -407,7 +407,7 @@ var (
Subsystem: typeutil.ProxyRole,
Name: "retry_search_result_insufficient_cnt",
Help: "counter of retry search which does not have enough results",
}, []string{nodeIDLabelName, queryTypeLabelName, collectionName})
}, []string{nodeIDLabelName, queryTypeLabelName, databaseLabelName, collectionName})
// ProxyRecallSearchCount records the counter that users issue recall evaluation requests, which are cpu-intensive
ProxyRecallSearchCount = prometheus.NewCounterVec(
@ -416,7 +416,7 @@ var (
Subsystem: typeutil.ProxyRole,
Name: "recall_search_cnt",
Help: "counter of recall search",
}, []string{nodeIDLabelName, queryTypeLabelName, collectionName})
}, []string{nodeIDLabelName, queryTypeLabelName, databaseLabelName, collectionName})
// ProxySearchSparseNumNonZeros records the estimated number of non-zeros in each sparse search task
ProxySearchSparseNumNonZeros = prometheus.NewHistogramVec(
@ -557,99 +557,132 @@ func CleanupProxyDBMetrics(nodeID int64, dbName string) {
})
}
func CleanupProxyCollectionMetrics(nodeID int64, collection string) {
func CleanupProxyCollectionMetrics(nodeID int64, dbName string, collection string) {
ProxySearchVectors.DeletePartialMatch(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
databaseLabelName: dbName,
collectionName: collection,
})
ProxyInsertVectors.DeletePartialMatch(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
databaseLabelName: dbName,
collectionName: collection,
})
ProxyUpsertVectors.DeletePartialMatch(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
databaseLabelName: dbName,
collectionName: collection,
})
ProxySQLatency.DeletePartialMatch(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
databaseLabelName: dbName,
collectionName: collection,
})
ProxyMutationLatency.DeletePartialMatch(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
databaseLabelName: dbName,
collectionName: collection,
})
ProxyFunctionCall.DeletePartialMatch(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
databaseLabelName: dbName,
collectionName: collection,
})
ProxyCollectionSQLatency.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: SearchLabel, collectionName: collection,
queryTypeLabelName: SearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyCollectionSQLatency.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: QueryLabel, collectionName: collection,
queryTypeLabelName: QueryLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyCollectionMutationLatency.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: InsertLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: InsertLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyCollectionMutationLatency.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: DeleteLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: DeleteLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceivedNQ.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: SearchLabel, collectionName: collection,
queryTypeLabelName: SearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceivedNQ.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: QueryLabel, collectionName: collection,
queryTypeLabelName: QueryLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceiveBytes.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: SearchLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: SearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceiveBytes.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: QueryLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: QueryLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceiveBytes.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: InsertLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: InsertLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceiveBytes.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: DeleteLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: DeleteLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyReceiveBytes.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: UpsertLabel, collectionName: collection,
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
msgTypeLabelName: UpsertLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyRetrySearchCount.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: SearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyRetrySearchCount.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: HybridSearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyRetrySearchResultInsufficientCount.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: SearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyRetrySearchResultInsufficientCount.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: HybridSearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
ProxyRecallSearchCount.Delete(prometheus.Labels{
nodeIDLabelName: strconv.FormatInt(nodeID, 10),
queryTypeLabelName: SearchLabel,
databaseLabelName: dbName,
collectionName: collection,
})
}

View File

@ -35,7 +35,7 @@ func RecordRestfulMetrics(ctx context.Context, outputLength int64, observeOutbou
if mstats.inboundPayloadSize > 0 {
ProxyReceiveBytes.WithLabelValues(
nodeIDValue,
mstats.inboundLabel, mstats.collectionName).Add(float64(mstats.inboundPayloadSize))
mstats.inboundLabel, mstats.databaseName, mstats.collectionName).Add(float64(mstats.inboundPayloadSize))
}
// set outbound payload size metrics
if outputLength > 0 && observeOutbound {

View File

@ -212,6 +212,7 @@ var (
Name: "entity_num",
Help: "number of entities, clustered by collection and their status(loaded/total)",
}, []string{
databaseLabelName,
collectionName,
statusLabelName,
})
@ -223,6 +224,7 @@ var (
Name: "indexed_entity_num",
Help: "indexed number of entities, clustered by collection, index name and whether it's a vector index",
}, []string{
databaseLabelName,
collectionName,
indexName,
isVectorIndex,

View File

@ -1197,7 +1197,7 @@ Fractions >= 1 will always sample. Fractions < 0 are treated as zero.`,
t.OtlpHeaders = ParamItem{
Key: "trace.otlp.headers",
Version: "2.4.0",
Version: "2.6.0",
DefaultValue: "",
Doc: "otlp header that encoded in base64",
Export: true,