From 25249fd26e161e19a82ffd379af5e4cb9357faef Mon Sep 17 00:00:00 2001 From: Buqian Zheng Date: Wed, 11 Dec 2024 10:00:43 +0800 Subject: [PATCH] enhance: [2.4] add metrics for counting number of nun-zeros/tokens of sparse search (#38328) sparse vectors may have arbitrary number of non zeros and it is hard to optimize without knowing the actual distribution of nnz. this PR adds a metric for analyzing that. pr: #38329 also fixed a bug of sparse when searching by pk Signed-off-by: Buqian Zheng --- internal/proxy/task_search.go | 1 + pkg/metrics/proxy_metrics.go | 11 +++++ pkg/util/funcutil/placeholdergroup.go | 7 +-- pkg/util/typeutil/schema.go | 8 ++++ pkg/util/typeutil/schema_test.go | 65 +++++++++++++++++++++++++++ 5 files changed, 86 insertions(+), 6 deletions(-) diff --git a/internal/proxy/task_search.go b/internal/proxy/task_search.go index 5ad8807e0c..65ae8a910c 100644 --- a/internal/proxy/task_search.go +++ b/internal/proxy/task_search.go @@ -463,6 +463,7 @@ func (t *searchTask) initSearchRequest(ctx context.Context) error { return err } + metrics.ProxySearchSparseNumNonZeros.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), t.collectionName).Observe(float64(typeutil.EstimateSparseVectorNNZFromPlaceholderGroup(t.request.PlaceholderGroup, int(t.request.GetNq())))) t.SearchRequest.PlaceholderGroup = t.request.PlaceholderGroup t.SearchRequest.Topk = queryInfo.GetTopk() t.SearchRequest.MetricType = queryInfo.GetMetricType() diff --git a/pkg/metrics/proxy_metrics.go b/pkg/metrics/proxy_metrics.go index b9ba7d619c..7e4646e313 100644 --- a/pkg/metrics/proxy_metrics.go +++ b/pkg/metrics/proxy_metrics.go @@ -408,6 +408,15 @@ var ( Name: "retry_search_result_insufficient_cnt", Help: "counter of retry search which does not have enough results", }, []string{nodeIDLabelName, queryTypeLabelName, collectionName}) + + ProxySearchSparseNumNonZeros = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: milvusNamespace, + Subsystem: typeutil.ProxyRole, + Name: "search_sparse_num_non_zeros", + Help: "the number of non-zeros in each sparse search task", + Buckets: buckets, + }, []string{nodeIDLabelName, collectionName}) ) // RegisterProxy registers Proxy metrics @@ -468,6 +477,8 @@ func RegisterProxy(registry *prometheus.Registry) { registry.MustRegister(MaxInsertRate) registry.MustRegister(ProxyRetrySearchCount) registry.MustRegister(ProxyRetrySearchResultInsufficientCount) + + registry.MustRegister(ProxySearchSparseNumNonZeros) } func CleanupProxyDBMetrics(nodeID int64, dbName string) { diff --git a/pkg/util/funcutil/placeholdergroup.go b/pkg/util/funcutil/placeholdergroup.go index 2fa66bdaea..0f63419250 100644 --- a/pkg/util/funcutil/placeholdergroup.go +++ b/pkg/util/funcutil/placeholdergroup.go @@ -2,7 +2,6 @@ package funcutil import ( "encoding/binary" - "fmt" "math" "github.com/cockroachdb/errors" @@ -83,14 +82,10 @@ func fieldDataToPlaceholderValue(fieldData *schemapb.FieldData) (*commonpb.Place return nil, errors.New("vector data is not schemapb.VectorField_SparseFloatVector") } vec := vectors.SparseFloatVector - bytes, err := proto.Marshal(vec) - if err != nil { - return nil, fmt.Errorf("failed to marshal schemapb.SparseFloatArray to bytes: %w", err) - } placeholderValue := &commonpb.PlaceholderValue{ Tag: "$0", Type: commonpb.PlaceholderType_SparseFloatVector, - Values: [][]byte{bytes}, + Values: vec.Contents, } return placeholderValue, nil default: diff --git a/pkg/util/typeutil/schema.go b/pkg/util/typeutil/schema.go index 40b44ae50b..5b8780b767 100644 --- a/pkg/util/typeutil/schema.go +++ b/pkg/util/typeutil/schema.go @@ -1869,3 +1869,11 @@ func SparseFloatRowDim(row []byte) int64 { } return int64(SparseFloatRowIndexAt(row, SparseFloatRowElementCount(row)-1)) + 1 } + +// placeholderGroup is a serialized PlaceholderGroup, return estimated total +// number of non-zero elements of all the sparse vectors in the placeholderGroup +// This is a rough estimate, and should be used only for statistics. +func EstimateSparseVectorNNZFromPlaceholderGroup(placeholderGroup []byte, nq int) int { + overheadBytes := math.Max(10, float64(nq*3)) + return (len(placeholderGroup) - int(overheadBytes)) / 8 +} diff --git a/pkg/util/typeutil/schema_test.go b/pkg/util/typeutil/schema_test.go index ff19f3313a..f961ca64cd 100644 --- a/pkg/util/typeutil/schema_test.go +++ b/pkg/util/typeutil/schema_test.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "fmt" "math" + "math/rand" "reflect" "testing" @@ -2714,3 +2715,67 @@ func TestParseJsonSparseFloatRowBytes(t *testing.T) { assert.Error(t, err) }) } + +// test EstimateSparseVectorNNZFromPlaceholderGroup: given a PlaceholderGroup +// with various nq and averageNNZ, test if the estimated number of non-zero +// elements is close to the actual number. +func TestSparsePlaceholderGroupSize(t *testing.T) { + nqs := []int{1, 10, 100, 1000, 10000} + averageNNZs := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048} + numCases := 0 + casesWithLargeError := 0 + for _, nq := range nqs { + for _, averageNNZ := range averageNNZs { + variants := make([]int, 0) + for i := 1; i <= averageNNZ/2; i *= 2 { + variants = append(variants, i) + } + + for _, variant := range variants { + numCases++ + contents := make([][]byte, nq) + contentsSize := 0 + totalNNZ := 0 + for i := range contents { + // nnz of each row is in range [averageNNZ - variant/2, averageNNZ + variant/2] and at least 1. + nnz := averageNNZ + variant/2 + rand.Intn(variant) + if nnz < 1 { + nnz = 1 + } + indices := make([]uint32, nnz) + values := make([]float32, nnz) + for j := 0; j < nnz; j++ { + indices[j] = uint32(i*averageNNZ + j) + values[j] = float32(i*averageNNZ + j) + } + contents[i] = CreateSparseFloatRow(indices, values) + contentsSize += len(contents[i]) + totalNNZ += nnz + } + + placeholderGroup := &commonpb.PlaceholderGroup{ + Placeholders: []*commonpb.PlaceholderValue{ + { + Tag: "$0", + Type: commonpb.PlaceholderType_SparseFloatVector, + Values: contents, + }, + }, + } + bytes, _ := proto.Marshal(placeholderGroup) + estimatedNNZ := EstimateSparseVectorNNZFromPlaceholderGroup(bytes, nq) + errorRatio := (float64(totalNNZ-estimatedNNZ) / float64(totalNNZ)) * 100 + assert.Less(t, errorRatio, 10.0) + if errorRatio > 5.0 { + casesWithLargeError++ + } + // keep the logs for easy debugging. + // fmt.Printf("nq: %d, total nnz: %d, overhead bytes: %d, len of bytes: %d\n", nq, totalNNZ, len(bytes)-contentsSize, len(bytes)) + // fmt.Printf("\tnq: %d, total nnz: %d, estimated nnz: %d, diff: %d, error ratio: %f%%\n", nq, totalNNZ, estimatedNNZ, totalNNZ-estimatedNNZ, errorRatio) + } + } + } + largeErrorRatio := (float64(casesWithLargeError) / float64(numCases)) * 100 + // no more than 2% cases have large error ratio. + assert.Less(t, largeErrorRatio, 2.0) +}