wei liu c6a1c49e02
enhance: Use Blocked Bloom Filter instead of basic bloom fitler impl. (#33405)
issue: #32995
To speed up the construction and querying of Bloom filters, we chose a
blocked Bloom filter instead of a basic Bloom filter implementation.

WARN: This PR is compatible with old version bf impl, but if fall back
to old milvus version, it may causes bloom filter deserialize failed.

In single Bloom filter test cases with a capacity of 1,000,000 and a
false positive rate (FPR) of 0.001, the blocked Bloom filter is 5 times
faster than the basic Bloom filter in both querying and construction, at
the cost of a 30% increase in memory usage.

- Block BF construct time	{"time": "54.128131ms"}
- Block BF size	                {"size": 3021578}
- Block BF Test cost	        {"time": "55.407352ms"}
- Basic BF construct time	{"time": "210.262183ms"}
- Basic BF size	                {"size": 2396308}
- Basic BF Test cost	        {"time": "192.596229ms"}

In multi Bloom filter test cases with a capacity of 100,000, an FPR of
0.001, and 100 Bloom filters, we reuse the primary key locations for all
Bloom filters to avoid repeated hash computations. As a result, the
blocked Bloom filter is also 5 times faster than the basic Bloom filter
in querying.

- Block BF TestLocation cost    {"time": "529.97183ms"}
- Basic BF TestLocation cost	{"time": "3.197430181s"}

---------

Signed-off-by: Wei Liu <wei.liu@zilliz.com>
2024-05-31 17:49:45 +08:00

219 lines
6.1 KiB
Go

package segments
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/suite"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/querypb"
storage "github.com/milvus-io/milvus/internal/storage"
"github.com/milvus-io/milvus/internal/util/initcore"
"github.com/milvus-io/milvus/pkg/util/paramtable"
)
type SegmentSuite struct {
suite.Suite
rootPath string
chunkManager storage.ChunkManager
// Data
manager *Manager
collectionID int64
partitionID int64
segmentID int64
collection *Collection
sealed Segment
growing Segment
}
func (suite *SegmentSuite) SetupSuite() {
paramtable.Init()
}
func (suite *SegmentSuite) SetupTest() {
var err error
ctx := context.Background()
msgLength := 100
suite.rootPath = suite.T().Name()
chunkManagerFactory := storage.NewTestChunkManagerFactory(paramtable.Get(), suite.rootPath)
suite.chunkManager, _ = chunkManagerFactory.NewPersistentStorageChunkManager(ctx)
initcore.InitRemoteChunkManager(paramtable.Get())
suite.collectionID = 100
suite.partitionID = 10
suite.segmentID = 1
suite.manager = NewManager()
schema := GenTestCollectionSchema("test-reduce", schemapb.DataType_Int64, true)
indexMeta := GenTestIndexMeta(suite.collectionID, schema)
suite.manager.Collection.PutOrRef(suite.collectionID,
schema,
indexMeta,
&querypb.LoadMetaInfo{
LoadType: querypb.LoadType_LoadCollection,
CollectionID: suite.collectionID,
PartitionIDs: []int64{suite.partitionID},
},
)
suite.collection = suite.manager.Collection.Get(suite.collectionID)
suite.sealed, err = NewSegment(ctx,
suite.collection,
SegmentTypeSealed,
0,
&querypb.SegmentLoadInfo{
CollectionID: suite.collectionID,
SegmentID: suite.segmentID,
PartitionID: suite.partitionID,
InsertChannel: fmt.Sprintf("by-dev-rootcoord-dml_0_%dv0", suite.collectionID),
Level: datapb.SegmentLevel_Legacy,
NumOfRows: int64(msgLength),
BinlogPaths: []*datapb.FieldBinlog{
{
FieldID: 101,
Binlogs: []*datapb.Binlog{
{
LogSize: 10086,
MemorySize: 10086,
},
},
},
},
},
)
suite.Require().NoError(err)
binlogs, _, err := SaveBinLog(ctx,
suite.collectionID,
suite.partitionID,
suite.segmentID,
msgLength,
schema,
suite.chunkManager,
)
suite.Require().NoError(err)
g, err := suite.sealed.(*LocalSegment).StartLoadData()
suite.Require().NoError(err)
for _, binlog := range binlogs {
err = suite.sealed.(*LocalSegment).LoadFieldData(ctx, binlog.FieldID, int64(msgLength), binlog, false)
suite.Require().NoError(err)
}
g.Done(nil)
suite.growing, err = NewSegment(ctx,
suite.collection,
SegmentTypeGrowing,
0,
&querypb.SegmentLoadInfo{
SegmentID: suite.segmentID + 1,
CollectionID: suite.collectionID,
PartitionID: suite.partitionID,
InsertChannel: fmt.Sprintf("by-dev-rootcoord-dml_0_%dv0", suite.collectionID),
Level: datapb.SegmentLevel_Legacy,
},
)
suite.Require().NoError(err)
insertMsg, err := genInsertMsg(suite.collection, suite.partitionID, suite.growing.ID(), msgLength)
suite.Require().NoError(err)
insertRecord, err := storage.TransferInsertMsgToInsertRecord(suite.collection.Schema(), insertMsg)
suite.Require().NoError(err)
err = suite.growing.Insert(ctx, insertMsg.RowIDs, insertMsg.Timestamps, insertRecord)
suite.Require().NoError(err)
suite.manager.Segment.Put(context.Background(), SegmentTypeSealed, suite.sealed)
suite.manager.Segment.Put(context.Background(), SegmentTypeGrowing, suite.growing)
}
func (suite *SegmentSuite) TearDownTest() {
ctx := context.Background()
suite.sealed.Release(context.Background())
suite.growing.Release(context.Background())
DeleteCollection(suite.collection)
suite.chunkManager.RemoveWithPrefix(ctx, suite.rootPath)
}
func (suite *SegmentSuite) TestLoadInfo() {
// sealed segment has load info
suite.NotNil(suite.sealed.LoadInfo())
// growing segment has no load info
suite.NotNil(suite.growing.LoadInfo())
}
func (suite *SegmentSuite) TestResourceUsageEstimate() {
// growing segment has resource usage
// growing segment can not estimate resource usage
usage := suite.growing.ResourceUsageEstimate()
suite.Zero(usage.MemorySize)
suite.Zero(usage.DiskSize)
// growing segment has no resource usage
usage = suite.sealed.ResourceUsageEstimate()
suite.NotZero(usage.MemorySize)
suite.Zero(usage.DiskSize)
suite.Zero(usage.MmapFieldCount)
}
func (suite *SegmentSuite) TestDelete() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pks, err := storage.GenInt64PrimaryKeys(0, 1)
suite.NoError(err)
// Test for sealed
rowNum := suite.sealed.RowNum()
err = suite.sealed.Delete(ctx, pks, []uint64{1000, 1000})
suite.NoError(err)
suite.Equal(rowNum-int64(len(pks)), suite.sealed.RowNum())
suite.Equal(rowNum, suite.sealed.InsertCount())
// Test for growing
rowNum = suite.growing.RowNum()
err = suite.growing.Delete(ctx, pks, []uint64{1000, 1000})
suite.NoError(err)
suite.Equal(rowNum-int64(len(pks)), suite.growing.RowNum())
suite.Equal(rowNum, suite.growing.InsertCount())
}
func (suite *SegmentSuite) TestHasRawData() {
has := suite.growing.HasRawData(simpleFloatVecField.id)
suite.True(has)
has = suite.sealed.HasRawData(simpleFloatVecField.id)
suite.True(has)
}
func (suite *SegmentSuite) TestCASVersion() {
segment := suite.sealed
curVersion := segment.Version()
suite.False(segment.CASVersion(curVersion-1, curVersion+1))
suite.NotEqual(curVersion+1, segment.Version())
suite.True(segment.CASVersion(curVersion, curVersion+1))
suite.Equal(curVersion+1, segment.Version())
}
func (suite *SegmentSuite) TestSegmentRemoveUnusedFieldFiles() {
}
func (suite *SegmentSuite) TestSegmentReleased() {
suite.sealed.Release(context.Background())
sealed := suite.sealed.(*LocalSegment)
suite.False(sealed.ptrLock.PinIfNotReleased())
suite.EqualValues(0, sealed.RowNum())
suite.EqualValues(0, sealed.MemSize())
suite.False(sealed.HasRawData(101))
}
func TestSegment(t *testing.T) {
suite.Run(t, new(SegmentSuite))
}