mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-28 14:35:27 +08:00
Related to #46133 Move jemalloc_stats.go and its test file from pkg/util/hardware to internal/util/segcore. This is a more appropriate location because: - jemalloc_stats depends on milvus_core C++ library via cgo - The pkg directory should remain independent of internal C++ dependencies - segcore is the natural home for core memory allocator utilities <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **Refactor** * Improved internal code organization by reorganizing memory statistics collection infrastructure for better maintainability and modularity. No impact on end-user functionality or behavior. <sub>✏️ Tip: You can customize this high-level summary in your review settings.</sub> <!-- end of auto-generated comment: release notes by coderabbit.ai --> Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
328 lines
10 KiB
Go
328 lines
10 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package querynodev2
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"math"
|
|
|
|
"github.com/samber/lo"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
|
"github.com/milvus-io/milvus/internal/distributed/streaming"
|
|
"github.com/milvus-io/milvus/internal/json"
|
|
"github.com/milvus-io/milvus/internal/querynodev2/collector"
|
|
"github.com/milvus-io/milvus/internal/querynodev2/delegator"
|
|
"github.com/milvus-io/milvus/internal/querynodev2/segments"
|
|
"github.com/milvus-io/milvus/internal/util/segcore"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/metrics"
|
|
"github.com/milvus-io/milvus/pkg/v2/streaming/util/types"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/hardware"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/metricsinfo"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/ratelimitutil"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/typeutil"
|
|
)
|
|
|
|
func getRateMetric() ([]metricsinfo.RateMetric, error) {
|
|
rms := make([]metricsinfo.RateMetric, 0)
|
|
for _, label := range collector.RateMetrics() {
|
|
rate, err := collector.Rate.Rate(label, ratelimitutil.DefaultAvgDuration)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
rms = append(rms, metricsinfo.RateMetric{
|
|
Label: label,
|
|
Rate: rate,
|
|
})
|
|
}
|
|
return rms, nil
|
|
}
|
|
|
|
// getQuotaMetrics returns QueryNodeQuotaMetrics.
|
|
func getQuotaMetrics(node *QueryNode) (*metricsinfo.QueryNodeQuotaMetrics, error) {
|
|
rms, err := getRateMetric()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
minTsafeChannel := ""
|
|
minTsafe := uint64(math.MaxUint64)
|
|
node.delegators.Range(func(channel string, delegator delegator.ShardDelegator) bool {
|
|
tsafe := delegator.GetTSafe()
|
|
if tsafe < minTsafe {
|
|
minTsafeChannel = channel
|
|
minTsafe = tsafe
|
|
}
|
|
return true
|
|
})
|
|
|
|
collections := node.manager.Collection.ListWithName()
|
|
nodeID := fmt.Sprint(node.GetNodeID())
|
|
|
|
var totalGrowingSize int64
|
|
growingSegments := node.manager.Segment.GetBy(segments.WithType(segments.SegmentTypeGrowing))
|
|
growingGroupByCollection := lo.GroupBy(growingSegments, func(seg segments.Segment) int64 {
|
|
return seg.Collection()
|
|
})
|
|
for collection, name := range collections {
|
|
coll := node.manager.Collection.Get(collection)
|
|
if coll == nil {
|
|
continue
|
|
}
|
|
segs := growingGroupByCollection[collection]
|
|
size := lo.SumBy(segs, func(seg segments.Segment) int64 {
|
|
return seg.MemSize()
|
|
})
|
|
totalGrowingSize += size
|
|
metrics.QueryNodeEntitiesSize.WithLabelValues(nodeID, fmt.Sprint(collection),
|
|
segments.SegmentTypeGrowing.String()).Set(float64(size))
|
|
|
|
numEntities := lo.SumBy(segs, func(seg segments.Segment) int64 {
|
|
return seg.RowNum()
|
|
})
|
|
|
|
metrics.QueryNodeNumEntities.WithLabelValues(
|
|
coll.GetDBName(),
|
|
name,
|
|
nodeID,
|
|
fmt.Sprint(collection),
|
|
segments.SegmentTypeGrowing.String(),
|
|
).Set(float64(numEntities))
|
|
}
|
|
|
|
sealedSegments := node.manager.Segment.GetBy(segments.WithType(segments.SegmentTypeSealed))
|
|
sealedGroupByCollection := lo.GroupBy(sealedSegments, func(seg segments.Segment) int64 {
|
|
return seg.Collection()
|
|
})
|
|
for collection, name := range collections {
|
|
coll := node.manager.Collection.Get(collection)
|
|
if coll == nil {
|
|
continue
|
|
}
|
|
segs := sealedGroupByCollection[collection]
|
|
size := lo.SumBy(segs, func(seg segments.Segment) int64 {
|
|
return seg.MemSize()
|
|
})
|
|
metrics.QueryNodeEntitiesSize.WithLabelValues(fmt.Sprint(node.GetNodeID()),
|
|
fmt.Sprint(collection), segments.SegmentTypeSealed.String()).Set(float64(size))
|
|
numEntities := lo.SumBy(segs, func(seg segments.Segment) int64 {
|
|
return seg.RowNum()
|
|
})
|
|
|
|
metrics.QueryNodeNumEntities.WithLabelValues(
|
|
coll.GetDBName(),
|
|
name,
|
|
nodeID,
|
|
fmt.Sprint(collection),
|
|
segments.SegmentTypeSealed.String(),
|
|
).Set(float64(numEntities))
|
|
}
|
|
|
|
deleteBufferNum := make(map[int64]int64)
|
|
deleteBufferSize := make(map[int64]int64)
|
|
|
|
node.delegators.Range(func(_ string, sd delegator.ShardDelegator) bool {
|
|
collectionID := sd.Collection()
|
|
entryNum, memorySize := sd.GetDeleteBufferSize()
|
|
deleteBufferNum[collectionID] += entryNum
|
|
deleteBufferSize[collectionID] += memorySize
|
|
return true
|
|
})
|
|
|
|
return &metricsinfo.QueryNodeQuotaMetrics{
|
|
Hms: metricsinfo.HardwareMetrics{},
|
|
Rms: rms,
|
|
Fgm: metricsinfo.FlowGraphMetric{
|
|
MinFlowGraphChannel: minTsafeChannel,
|
|
MinFlowGraphTt: minTsafe,
|
|
NumFlowGraph: node.pipelineManager.Num(),
|
|
},
|
|
GrowingSegmentsSize: totalGrowingSize,
|
|
LoadedBinlogSize: node.manager.Segment.GetLoadedBinlogSize(),
|
|
Effect: metricsinfo.NodeEffect{
|
|
NodeID: node.GetNodeID(),
|
|
CollectionIDs: lo.Keys(collections),
|
|
},
|
|
DeleteBufferInfo: metricsinfo.DeleteBufferInfo{
|
|
CollectionDeleteBufferNum: deleteBufferNum,
|
|
CollectionDeleteBufferSize: deleteBufferSize,
|
|
},
|
|
StreamingQuota: getStreamingQuotaMetrics(),
|
|
}, nil
|
|
}
|
|
|
|
// getStreamingQuotaMetrics returns the streaming quota metrics of the QueryNode.
|
|
func getStreamingQuotaMetrics() *metricsinfo.StreamingQuotaMetrics {
|
|
if streamingMetrics, err := streaming.WAL().Local().GetMetricsIfLocal(context.Background()); err == nil {
|
|
walMetrics := make([]metricsinfo.WALMetrics, 0, len(streamingMetrics.WALMetrics))
|
|
for channel, metric := range streamingMetrics.WALMetrics {
|
|
if rwMetric, ok := metric.(types.RWWALMetrics); ok {
|
|
walMetrics = append(walMetrics, metricsinfo.WALMetrics{
|
|
Channel: channel,
|
|
RecoveryTimeTick: rwMetric.RecoveryTimeTick,
|
|
})
|
|
}
|
|
}
|
|
return &metricsinfo.StreamingQuotaMetrics{
|
|
WALs: walMetrics,
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func getCollectionMetrics(node *QueryNode) (*metricsinfo.QueryNodeCollectionMetrics, error) {
|
|
allSegments := node.manager.Segment.GetBy()
|
|
ret := &metricsinfo.QueryNodeCollectionMetrics{
|
|
CollectionRows: make(map[int64]int64),
|
|
}
|
|
for _, segment := range allSegments {
|
|
collectionID := segment.Collection()
|
|
ret.CollectionRows[collectionID] += segment.RowNum()
|
|
}
|
|
return ret, nil
|
|
}
|
|
|
|
// getChannelJSON returns the JSON string of channels
|
|
func getChannelJSON(node *QueryNode, collectionID int64) string {
|
|
stats := node.pipelineManager.GetChannelStats(collectionID)
|
|
ret, err := json.Marshal(stats)
|
|
if err != nil {
|
|
log.Warn("failed to marshal channels", zap.Error(err))
|
|
return ""
|
|
}
|
|
return string(ret)
|
|
}
|
|
|
|
// getSegmentJSON returns the JSON string of segments
|
|
func getSegmentJSON(node *QueryNode, collectionID int64) string {
|
|
allSegments := node.manager.Segment.GetBy()
|
|
var ms []*metricsinfo.Segment
|
|
for _, s := range allSegments {
|
|
if collectionID > 0 && s.Collection() != collectionID {
|
|
continue
|
|
}
|
|
|
|
indexes := make([]*metricsinfo.IndexedField, 0, len(s.Indexes()))
|
|
for _, index := range s.Indexes() {
|
|
indexes = append(indexes, &metricsinfo.IndexedField{
|
|
IndexFieldID: index.IndexInfo.FieldID,
|
|
IndexID: index.IndexInfo.IndexID,
|
|
IndexSize: index.IndexInfo.IndexSize,
|
|
BuildID: index.IndexInfo.BuildID,
|
|
IsLoaded: index.IsLoaded,
|
|
HasRawData: s.HasRawData(index.IndexInfo.FieldID),
|
|
})
|
|
}
|
|
|
|
ms = append(ms, &metricsinfo.Segment{
|
|
SegmentID: s.ID(),
|
|
CollectionID: s.Collection(),
|
|
PartitionID: s.Partition(),
|
|
MemSize: s.MemSize(),
|
|
IndexedFields: indexes,
|
|
State: s.Type().String(),
|
|
ResourceGroup: s.ResourceGroup(),
|
|
LoadedInsertRowCount: s.InsertCount(),
|
|
NodeID: node.GetNodeID(),
|
|
})
|
|
}
|
|
|
|
ret, err := json.Marshal(ms)
|
|
if err != nil {
|
|
log.Warn("failed to marshal segments", zap.Error(err))
|
|
return ""
|
|
}
|
|
return string(ret)
|
|
}
|
|
|
|
// getSystemInfoMetrics returns metrics info of QueryNode
|
|
func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest, node *QueryNode) (string, error) {
|
|
usedMem := hardware.GetUsedMemoryCount()
|
|
totalMem := hardware.GetMemoryCount()
|
|
|
|
usedDiskGB, totalDiskGB, err := hardware.GetDiskUsage(paramtable.Get().LocalStorageCfg.Path.GetValue())
|
|
if err != nil {
|
|
log.Ctx(ctx).Warn("get disk usage failed", zap.Error(err))
|
|
}
|
|
|
|
ioWait, err := hardware.GetIOWait()
|
|
if err != nil {
|
|
log.Ctx(ctx).Warn("get iowait failed", zap.Error(err))
|
|
}
|
|
|
|
// Get jemalloc memory statistics
|
|
jemallocStats := segcore.GetJemallocStats()
|
|
|
|
hardwareInfos := metricsinfo.HardwareMetrics{
|
|
IP: node.session.Address,
|
|
CPUCoreCount: hardware.GetCPUNum(),
|
|
CPUCoreUsage: hardware.GetCPUUsage(),
|
|
Memory: totalMem,
|
|
MemoryUsage: usedMem,
|
|
Disk: totalDiskGB,
|
|
DiskUsage: usedDiskGB,
|
|
IOWaitPercentage: ioWait,
|
|
// Jemalloc memory statistics (comprehensive metrics)
|
|
JemallocAllocated: jemallocStats.Allocated,
|
|
JemallocActive: jemallocStats.Active,
|
|
JemallocMetadata: jemallocStats.Metadata,
|
|
JemallocResident: jemallocStats.Resident,
|
|
JemallocMapped: jemallocStats.Mapped,
|
|
JemallocRetained: jemallocStats.Retained,
|
|
JemallocFragmentation: jemallocStats.Fragmentation,
|
|
JemallocOverhead: jemallocStats.Overhead,
|
|
JemallocSuccess: jemallocStats.Success,
|
|
}
|
|
|
|
quotaMetrics, err := getQuotaMetrics(node)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
quotaMetrics.Hms = hardwareInfos
|
|
|
|
collectionMetrics, err := getCollectionMetrics(node)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
nodeInfos := metricsinfo.QueryNodeInfos{
|
|
BaseComponentInfos: metricsinfo.BaseComponentInfos{
|
|
Name: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, node.GetNodeID()),
|
|
HardwareInfos: hardwareInfos,
|
|
SystemInfo: metricsinfo.DeployMetrics{},
|
|
CreatedTime: paramtable.GetCreateTime().String(),
|
|
UpdatedTime: paramtable.GetUpdateTime().String(),
|
|
Type: typeutil.QueryNodeRole,
|
|
ID: node.session.ServerID,
|
|
},
|
|
SystemConfigurations: metricsinfo.QueryNodeConfiguration{
|
|
SimdType: paramtable.Get().CommonCfg.SimdType.GetValue(),
|
|
},
|
|
QuotaMetrics: quotaMetrics,
|
|
CollectionMetrics: collectionMetrics,
|
|
}
|
|
metricsinfo.FillDeployMetricsWithEnv(&nodeInfos.SystemInfo)
|
|
|
|
return metricsinfo.MarshalComponentInfos(nodeInfos)
|
|
}
|