enhance: [2.4] add metrics for counting number of nun-zeros/tokens of sparse search (#38328)

sparse vectors may have arbitrary number of non zeros and it is hard to
optimize without knowing the actual distribution of nnz. this PR adds a
metric for analyzing that.

pr: #38329 

also fixed a bug of sparse when searching by pk

Signed-off-by: Buqian Zheng <zhengbuqian@gmail.com>
This commit is contained in:
Buqian Zheng 2024-12-11 10:00:43 +08:00 committed by GitHub
parent 6b310e16dc
commit 25249fd26e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 86 additions and 6 deletions

View File

@ -463,6 +463,7 @@ func (t *searchTask) initSearchRequest(ctx context.Context) error {
return err
}
metrics.ProxySearchSparseNumNonZeros.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), t.collectionName).Observe(float64(typeutil.EstimateSparseVectorNNZFromPlaceholderGroup(t.request.PlaceholderGroup, int(t.request.GetNq()))))
t.SearchRequest.PlaceholderGroup = t.request.PlaceholderGroup
t.SearchRequest.Topk = queryInfo.GetTopk()
t.SearchRequest.MetricType = queryInfo.GetMetricType()

View File

@ -408,6 +408,15 @@ var (
Name: "retry_search_result_insufficient_cnt",
Help: "counter of retry search which does not have enough results",
}, []string{nodeIDLabelName, queryTypeLabelName, collectionName})
ProxySearchSparseNumNonZeros = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: milvusNamespace,
Subsystem: typeutil.ProxyRole,
Name: "search_sparse_num_non_zeros",
Help: "the number of non-zeros in each sparse search task",
Buckets: buckets,
}, []string{nodeIDLabelName, collectionName})
)
// RegisterProxy registers Proxy metrics
@ -468,6 +477,8 @@ func RegisterProxy(registry *prometheus.Registry) {
registry.MustRegister(MaxInsertRate)
registry.MustRegister(ProxyRetrySearchCount)
registry.MustRegister(ProxyRetrySearchResultInsufficientCount)
registry.MustRegister(ProxySearchSparseNumNonZeros)
}
func CleanupProxyDBMetrics(nodeID int64, dbName string) {

View File

@ -2,7 +2,6 @@ package funcutil
import (
"encoding/binary"
"fmt"
"math"
"github.com/cockroachdb/errors"
@ -83,14 +82,10 @@ func fieldDataToPlaceholderValue(fieldData *schemapb.FieldData) (*commonpb.Place
return nil, errors.New("vector data is not schemapb.VectorField_SparseFloatVector")
}
vec := vectors.SparseFloatVector
bytes, err := proto.Marshal(vec)
if err != nil {
return nil, fmt.Errorf("failed to marshal schemapb.SparseFloatArray to bytes: %w", err)
}
placeholderValue := &commonpb.PlaceholderValue{
Tag: "$0",
Type: commonpb.PlaceholderType_SparseFloatVector,
Values: [][]byte{bytes},
Values: vec.Contents,
}
return placeholderValue, nil
default:

View File

@ -1869,3 +1869,11 @@ func SparseFloatRowDim(row []byte) int64 {
}
return int64(SparseFloatRowIndexAt(row, SparseFloatRowElementCount(row)-1)) + 1
}
// placeholderGroup is a serialized PlaceholderGroup, return estimated total
// number of non-zero elements of all the sparse vectors in the placeholderGroup
// This is a rough estimate, and should be used only for statistics.
func EstimateSparseVectorNNZFromPlaceholderGroup(placeholderGroup []byte, nq int) int {
overheadBytes := math.Max(10, float64(nq*3))
return (len(placeholderGroup) - int(overheadBytes)) / 8
}

View File

@ -20,6 +20,7 @@ import (
"encoding/binary"
"fmt"
"math"
"math/rand"
"reflect"
"testing"
@ -2714,3 +2715,67 @@ func TestParseJsonSparseFloatRowBytes(t *testing.T) {
assert.Error(t, err)
})
}
// test EstimateSparseVectorNNZFromPlaceholderGroup: given a PlaceholderGroup
// with various nq and averageNNZ, test if the estimated number of non-zero
// elements is close to the actual number.
func TestSparsePlaceholderGroupSize(t *testing.T) {
nqs := []int{1, 10, 100, 1000, 10000}
averageNNZs := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048}
numCases := 0
casesWithLargeError := 0
for _, nq := range nqs {
for _, averageNNZ := range averageNNZs {
variants := make([]int, 0)
for i := 1; i <= averageNNZ/2; i *= 2 {
variants = append(variants, i)
}
for _, variant := range variants {
numCases++
contents := make([][]byte, nq)
contentsSize := 0
totalNNZ := 0
for i := range contents {
// nnz of each row is in range [averageNNZ - variant/2, averageNNZ + variant/2] and at least 1.
nnz := averageNNZ + variant/2 + rand.Intn(variant)
if nnz < 1 {
nnz = 1
}
indices := make([]uint32, nnz)
values := make([]float32, nnz)
for j := 0; j < nnz; j++ {
indices[j] = uint32(i*averageNNZ + j)
values[j] = float32(i*averageNNZ + j)
}
contents[i] = CreateSparseFloatRow(indices, values)
contentsSize += len(contents[i])
totalNNZ += nnz
}
placeholderGroup := &commonpb.PlaceholderGroup{
Placeholders: []*commonpb.PlaceholderValue{
{
Tag: "$0",
Type: commonpb.PlaceholderType_SparseFloatVector,
Values: contents,
},
},
}
bytes, _ := proto.Marshal(placeholderGroup)
estimatedNNZ := EstimateSparseVectorNNZFromPlaceholderGroup(bytes, nq)
errorRatio := (float64(totalNNZ-estimatedNNZ) / float64(totalNNZ)) * 100
assert.Less(t, errorRatio, 10.0)
if errorRatio > 5.0 {
casesWithLargeError++
}
// keep the logs for easy debugging.
// fmt.Printf("nq: %d, total nnz: %d, overhead bytes: %d, len of bytes: %d\n", nq, totalNNZ, len(bytes)-contentsSize, len(bytes))
// fmt.Printf("\tnq: %d, total nnz: %d, estimated nnz: %d, diff: %d, error ratio: %f%%\n", nq, totalNNZ, estimatedNNZ, totalNNZ-estimatedNNZ, errorRatio)
}
}
}
largeErrorRatio := (float64(casesWithLargeError) / float64(numCases)) * 100
// no more than 2% cases have large error ratio.
assert.Less(t, largeErrorRatio, 2.0)
}