Add ctx parameter for ChunkManager methods (#19546)

Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>

Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
This commit is contained in:
congqixia 2022-09-29 16:18:56 +08:00 committed by GitHub
parent 1817627316
commit 838a633584
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 810 additions and 698 deletions

View File

@ -494,7 +494,7 @@ func (c *mck) extractVchannelInfo(taskID int64, infos []*datapb.VchannelInfo) ([
func (c *mck) extractFieldBinlog(taskID int64, fieldBinlogList []*datapb.FieldBinlog) { func (c *mck) extractFieldBinlog(taskID int64, fieldBinlogList []*datapb.FieldBinlog) {
for _, fieldBinlog := range fieldBinlogList { for _, fieldBinlog := range fieldBinlogList {
for _, binlog := range fieldBinlog.Binlogs { for _, binlog := range fieldBinlog.Binlogs {
ok, _ := c.minioChunkManager.Exist(binlog.LogPath) ok, _ := c.minioChunkManager.Exist(context.Background(), binlog.LogPath)
if !ok { if !ok {
c.taskIDToInvalidPath[taskID] = append(c.taskIDToInvalidPath[taskID], binlog.LogPath) c.taskIDToInvalidPath[taskID] = append(c.taskIDToInvalidPath[taskID], binlog.LogPath)
} }
@ -505,7 +505,7 @@ func (c *mck) extractFieldBinlog(taskID int64, fieldBinlogList []*datapb.FieldBi
func (c *mck) extractVecFieldIndexInfo(taskID int64, infos []*querypb.FieldIndexInfo) { func (c *mck) extractVecFieldIndexInfo(taskID int64, infos []*querypb.FieldIndexInfo) {
for _, info := range infos { for _, info := range infos {
for _, indexPath := range info.IndexFilePaths { for _, indexPath := range info.IndexFilePaths {
ok, _ := c.minioChunkManager.Exist(indexPath) ok, _ := c.minioChunkManager.Exist(context.Background(), indexPath)
if !ok { if !ok {
c.taskIDToInvalidPath[taskID] = append(c.taskIDToInvalidPath[taskID], indexPath) c.taskIDToInvalidPath[taskID] = append(c.taskIDToInvalidPath[taskID], indexPath)
} }

View File

@ -17,6 +17,7 @@
package datacoord package datacoord
import ( import (
"context"
"path" "path"
"sync" "sync"
"time" "time"
@ -116,6 +117,8 @@ func (gc *garbageCollector) close() {
// scan load meta file info and compares OSS keys // scan load meta file info and compares OSS keys
// if missing found, performs gc cleanup // if missing found, performs gc cleanup
func (gc *garbageCollector) scan() { func (gc *garbageCollector) scan() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var total, valid, missing int var total, valid, missing int
segmentFiles := gc.meta.ListSegmentFiles() segmentFiles := gc.meta.ListSegmentFiles()
filesMap := make(map[string]struct{}) filesMap := make(map[string]struct{})
@ -131,7 +134,7 @@ func (gc *garbageCollector) scan() {
var removedKeys []string var removedKeys []string
for _, prefix := range prefixes { for _, prefix := range prefixes {
infoKeys, modTimes, err := gc.option.cli.ListWithPrefix(prefix, true) infoKeys, modTimes, err := gc.option.cli.ListWithPrefix(ctx, prefix, true)
if err != nil { if err != nil {
log.Error("gc listWithPrefix error", zap.String("error", err.Error())) log.Error("gc listWithPrefix error", zap.String("error", err.Error()))
} }
@ -161,7 +164,7 @@ func (gc *garbageCollector) scan() {
if time.Since(modTimes[i]) > gc.option.missingTolerance { if time.Since(modTimes[i]) > gc.option.missingTolerance {
// ignore error since it could be cleaned up next time // ignore error since it could be cleaned up next time
removedKeys = append(removedKeys, infoKey) removedKeys = append(removedKeys, infoKey)
err = gc.option.cli.Remove(infoKey) err = gc.option.cli.Remove(ctx, infoKey)
if err != nil { if err != nil {
log.Error("failed to remove object", zap.String("infoKey", infoKey), zap.Error(err)) log.Error("failed to remove object", zap.String("infoKey", infoKey), zap.Error(err))
} }
@ -236,9 +239,11 @@ func getLogs(sinfo *SegmentInfo) []*datapb.Binlog {
} }
func (gc *garbageCollector) removeLogs(logs []*datapb.Binlog) bool { func (gc *garbageCollector) removeLogs(logs []*datapb.Binlog) bool {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
delFlag := true delFlag := true
for _, l := range logs { for _, l := range logs {
err := gc.option.cli.Remove(l.GetLogPath()) err := gc.option.cli.Remove(ctx, l.GetLogPath())
if err != nil { if err != nil {
switch err.(type) { switch err.(type) {
case minio.ErrorResponse: case minio.ErrorResponse:

View File

@ -338,7 +338,7 @@ func initUtOSSEnv(bucket, root string, n int) (mcm *storage.MinioChunkManager, i
mcm = &storage.MinioChunkManager{ mcm = &storage.MinioChunkManager{
Client: cli, Client: cli,
} }
mcm.SetVar(context.TODO(), bucket, root) mcm.SetVar(bucket, root)
return mcm, inserts, stats, delta, other, nil return mcm, inserts, stats, delta, other, nil
} }

View File

@ -86,7 +86,7 @@ func (b *binlogIO) download(ctx context.Context, paths []string) ([]*Blob, error
log.Warn("downloading failed, retry in 50ms", zap.Strings("paths", paths)) log.Warn("downloading failed, retry in 50ms", zap.Strings("paths", paths))
<-time.After(50 * time.Millisecond) <-time.After(50 * time.Millisecond)
} }
vs, err = b.MultiRead(paths) vs, err = b.MultiRead(ctx, paths)
} }
} }
return nil return nil
@ -125,7 +125,7 @@ func (b *binlogIO) uploadSegmentFiles(
zap.Int64("segmentID", segID)) zap.Int64("segmentID", segID))
<-time.After(50 * time.Millisecond) <-time.After(50 * time.Millisecond)
} }
err = b.MultiWrite(kvs) err = b.MultiWrite(ctx, kvs)
} }
} }
return nil return nil

View File

@ -35,9 +35,11 @@ import (
var binlogTestDir = "/tmp/milvus_test/test_binlog_io" var binlogTestDir = "/tmp/milvus_test/test_binlog_io"
func TestBinlogIOInterfaceMethods(t *testing.T) { func TestBinlogIOInterfaceMethods(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
alloc := NewAllocatorFactory() alloc := NewAllocatorFactory()
cm := storage.NewLocalChunkManager(storage.RootPath(binlogTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(binlogTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
b := &binlogIO{cm, alloc} b := &binlogIO{cm, alloc}
t.Run("Test upload", func(t *testing.T) { t.Run("Test upload", func(t *testing.T) {
@ -259,10 +261,13 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
} }
func prepareBlob(cm storage.ChunkManager, key string) ([]byte, string, error) { func prepareBlob(cm storage.ChunkManager, key string) ([]byte, string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
k := path.Join("test_prepare_blob", key) k := path.Join("test_prepare_blob", key)
blob := []byte{1, 2, 3, 255, 188} blob := []byte{1, 2, 3, 255, 188}
err := cm.Write(k, blob[:]) err := cm.Write(ctx, k, blob[:])
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -270,9 +275,11 @@ func prepareBlob(cm storage.ChunkManager, key string) ([]byte, string, error) {
} }
func TestBinlogIOInnerMethods(t *testing.T) { func TestBinlogIOInnerMethods(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
alloc := NewAllocatorFactory() alloc := NewAllocatorFactory()
cm := storage.NewLocalChunkManager(storage.RootPath(binlogTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(binlogTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
b := &binlogIO{ b := &binlogIO{
cm, cm,
alloc, alloc,
@ -447,33 +454,33 @@ func (mk *mockCm) RootPath() string {
return "mock_test" return "mock_test"
} }
func (mk *mockCm) Write(filePath string, content []byte) error { func (mk *mockCm) Write(ctx context.Context, filePath string, content []byte) error {
return nil return nil
} }
func (mk *mockCm) MultiWrite(contents map[string][]byte) error { func (mk *mockCm) MultiWrite(ctx context.Context, contents map[string][]byte) error {
if mk.errMultiSave { if mk.errMultiSave {
return errors.New("mockKv multisave error") return errors.New("mockKv multisave error")
} }
return nil return nil
} }
func (mk *mockCm) Read(filePath string) ([]byte, error) { func (mk *mockCm) Read(ctx context.Context, filePath string) ([]byte, error) {
return nil, nil return nil, nil
} }
func (mk *mockCm) MultiRead(filePaths []string) ([][]byte, error) { func (mk *mockCm) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error) {
if mk.errMultiLoad { if mk.errMultiLoad {
return nil, errors.New("mockKv multiload error") return nil, errors.New("mockKv multiload error")
} }
return [][]byte{[]byte("a")}, nil return [][]byte{[]byte("a")}, nil
} }
func (mk *mockCm) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (mk *mockCm) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
return nil, nil, nil return nil, nil, nil
} }
func (mk *mockCm) Remove(key string) error { return nil } func (mk *mockCm) Remove(ctx context.Context, key string) error { return nil }
func (mk *mockCm) MultiRemove(keys []string) error { return nil } func (mk *mockCm) MultiRemove(ctx context.Context, keys []string) error { return nil }
func (mk *mockCm) RemoveWithPrefix(key string) error { return nil } func (mk *mockCm) RemoveWithPrefix(ctx context.Context, key string) error { return nil }
func (mk *mockCm) Close() {} func (mk *mockCm) Close() {}

View File

@ -42,8 +42,10 @@ import (
var compactTestDir = "/tmp/milvus_test/compact" var compactTestDir = "/tmp/milvus_test/compact"
func TestCompactionTaskInnerMethods(t *testing.T) { func TestCompactionTaskInnerMethods(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(compactTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(compactTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
t.Run("Test getSegmentMeta", func(t *testing.T) { t.Run("Test getSegmentMeta", func(t *testing.T) {
rc := &RootCoordFactory{ rc := &RootCoordFactory{
pkType: schemapb.DataType_Int64, pkType: schemapb.DataType_Int64,
@ -534,8 +536,10 @@ func getInsertBlobs(segID UniqueID, iData *InsertData, meta *etcdpb.CollectionMe
} }
func TestCompactorInterfaceMethods(t *testing.T) { func TestCompactorInterfaceMethods(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(compactTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(compactTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
notEmptySegmentBinlogs := []*datapb.CompactionSegmentBinlogs{{ notEmptySegmentBinlogs := []*datapb.CompactionSegmentBinlogs{{
SegmentID: 100, SegmentID: 100,
FieldBinlogs: nil, FieldBinlogs: nil,

View File

@ -928,7 +928,7 @@ func (node *DataNode) SyncSegments(ctx context.Context, req *datapb.SyncSegments
numRows: req.GetNumOfRows(), numRows: req.GetNumOfRows(),
} }
replica.(*SegmentReplica).initPKBloomFilter(targetSeg, req.GetStatsLogs(), tsoutil.GetCurrentTime()) replica.(*SegmentReplica).initPKBloomFilter(ctx, targetSeg, req.GetStatsLogs(), tsoutil.GetCurrentTime())
if err := replica.mergeFlushedSegments(targetSeg, req.GetPlanID(), req.GetCompactedFrom()); err != nil { if err := replica.mergeFlushedSegments(targetSeg, req.GetPlanID(), req.GetCompactedFrom()); err != nil {
status.Reason = err.Error() status.Reason = err.Error()
@ -1286,6 +1286,10 @@ func composeAssignSegmentIDRequest(rowNum int, shardID int, chNames []string,
func createBinLogs(rowNum int, schema *schemapb.CollectionSchema, ts Timestamp, func createBinLogs(rowNum int, schema *schemapb.CollectionSchema, ts Timestamp,
fields map[storage.FieldID]storage.FieldData, node *DataNode, segmentID, colID, partID UniqueID) ([]*datapb.FieldBinlog, []*datapb.FieldBinlog, error) { fields map[storage.FieldID]storage.FieldData, node *DataNode, segmentID, colID, partID UniqueID) ([]*datapb.FieldBinlog, []*datapb.FieldBinlog, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tsFieldData := make([]int64, rowNum) tsFieldData := make([]int64, rowNum)
for i := range tsFieldData { for i := range tsFieldData {
tsFieldData[i] = int64(ts) tsFieldData[i] = int64(ts)
@ -1375,7 +1379,7 @@ func createBinLogs(rowNum int, schema *schemapb.CollectionSchema, ts Timestamp,
} }
} }
err = node.chunkManager.MultiWrite(kvs) err = node.chunkManager.MultiWrite(ctx, kvs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -453,7 +453,7 @@ func TestDataNode(t *testing.T) {
assert.True(t, ok) assert.True(t, ok)
filePath := "import/rows_1.json" filePath := "import/rows_1.json"
err = node.chunkManager.Write(filePath, content) err = node.chunkManager.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
req := &datapb.ImportTaskRequest{ req := &datapb.ImportTaskRequest{
ImportTask: &datapb.ImportTask{ ImportTask: &datapb.ImportTask{
@ -529,7 +529,7 @@ func TestDataNode(t *testing.T) {
}`) }`)
filePath := "import/rows_1.json" filePath := "import/rows_1.json"
err = node.chunkManager.Write(filePath, content) err = node.chunkManager.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
req := &datapb.ImportTaskRequest{ req := &datapb.ImportTaskRequest{
ImportTask: &datapb.ImportTask{ ImportTask: &datapb.ImportTask{
@ -562,7 +562,7 @@ func TestDataNode(t *testing.T) {
}`) }`)
filePath := "import/rows_1.json" filePath := "import/rows_1.json"
err = node.chunkManager.Write(filePath, content) err = node.chunkManager.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
req := &datapb.ImportTaskRequest{ req := &datapb.ImportTaskRequest{
ImportTask: &datapb.ImportTask{ ImportTask: &datapb.ImportTask{

View File

@ -146,7 +146,7 @@ func TestDataSyncService_newDataSyncService(te *testing.T) {
"add un-flushed and flushed segments"}, "add un-flushed and flushed segments"},
} }
cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
for _, test := range tests { for _, test := range tests {
te.Run(test.description, func(t *testing.T) { te.Run(test.description, func(t *testing.T) {
@ -209,7 +209,7 @@ func TestDataSyncService_Start(t *testing.T) {
flushChan := make(chan flushMsg, 100) flushChan := make(chan flushMsg, 100)
resendTTChan := make(chan resendTTMsg, 100) resendTTChan := make(chan resendTTMsg, 100)
cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
replica, err := newReplica(context.Background(), mockRootCoord, cm, collectionID) replica, err := newReplica(context.Background(), mockRootCoord, cm, collectionID)
assert.Nil(t, err) assert.Nil(t, err)
@ -411,9 +411,11 @@ func TestGetSegmentInfos(t *testing.T) {
} }
func TestClearGlobalFlushingCache(t *testing.T) { func TestClearGlobalFlushingCache(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dataCoord := &DataCoordFactory{} dataCoord := &DataCoordFactory{}
cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
replica, err := newReplica(context.Background(), &RootCoordFactory{pkType: schemapb.DataType_Int64}, cm, 1) replica, err := newReplica(context.Background(), &RootCoordFactory{pkType: schemapb.DataType_Int64}, cm, 1)
require.NoError(t, err) require.NoError(t, err)

View File

@ -199,6 +199,7 @@ func genMockReplica(segIDs []int64, pks []primaryKey, chanName string) *mockRepl
} }
func TestFlowGraphDeleteNode_Operate(t *testing.T) { func TestFlowGraphDeleteNode_Operate(t *testing.T) {
ctx := context.Background()
t.Run("Test deleteNode Operate invalid Msg", func(te *testing.T) { t.Run("Test deleteNode Operate invalid Msg", func(te *testing.T) {
invalidInTests := []struct { invalidInTests := []struct {
in []Msg in []Msg
@ -245,7 +246,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
tss = []uint64{1, 1, 1, 1, 1} tss = []uint64{1, 1, 1, 1, 1}
) )
cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
t.Run("Test get segment by varChar primary keys", func(te *testing.T) { t.Run("Test get segment by varChar primary keys", func(te *testing.T) {
replica := genMockReplica(segIDs, varCharPks, chanName) replica := genMockReplica(segIDs, varCharPks, chanName)
@ -475,14 +476,14 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
} }
func TestFlowGraphDeleteNode_showDelBuf(t *testing.T) { func TestFlowGraphDeleteNode_showDelBuf(t *testing.T) {
cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir))
defer cm.RemoveWithPrefix("")
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, &mockReplica{}, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel() defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir))
defer cm.RemoveWithPrefix(ctx, "")
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, &mockReplica{}, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
chanName := "datanode-test-FlowGraphDeletenode-showDelBuf" chanName := "datanode-test-FlowGraphDeletenode-showDelBuf"
testPath := "/test/datanode/root/meta" testPath := "/test/datanode/root/meta"
assert.NoError(t, clearEtcd(testPath)) assert.NoError(t, clearEtcd(testPath))
@ -515,13 +516,13 @@ func TestFlowGraphDeleteNode_showDelBuf(t *testing.T) {
} }
func TestFlowGraphDeleteNode_updateCompactedSegments(t *testing.T) { func TestFlowGraphDeleteNode_updateCompactedSegments(t *testing.T) {
cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir))
defer cm.RemoveWithPrefix("")
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, &mockReplica{}, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel() defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir))
defer cm.RemoveWithPrefix(ctx, "")
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, &mockReplica{}, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
chanName := "datanode-test-FlowGraphDeletenode-showDelBuf" chanName := "datanode-test-FlowGraphDeletenode-showDelBuf"
testPath := "/test/datanode/root/meta" testPath := "/test/datanode/root/meta"

View File

@ -62,7 +62,7 @@ func TestFlowGraphInsertBufferNodeCreate(t *testing.T) {
defer cancel() defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
insertChannelName := "datanode-01-test-flowgraphinsertbuffernode-create" insertChannelName := "datanode-01-test-flowgraphinsertbuffernode-create"
testPath := "/test/datanode/root/meta" testPath := "/test/datanode/root/meta"
@ -159,7 +159,7 @@ func TestFlowGraphInsertBufferNode_Operate(t *testing.T) {
insertChannelName := "datanode-01-test-flowgraphinsertbuffernode-operate" insertChannelName := "datanode-01-test-flowgraphinsertbuffernode-operate"
cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
testPath := "/test/datanode/root/meta" testPath := "/test/datanode/root/meta"
err := clearEtcd(testPath) err := clearEtcd(testPath)
require.NoError(t, err) require.NoError(t, err)
@ -429,7 +429,7 @@ func TestFlowGraphInsertBufferNode_AutoFlush(t *testing.T) {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, colRep, func(pack *segmentFlushPack) { fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, colRep, func(pack *segmentFlushPack) {
fpMut.Lock() fpMut.Lock()
flushPacks = append(flushPacks, pack) flushPacks = append(flushPacks, pack)
@ -685,7 +685,7 @@ func TestFlowGraphInsertBufferNode_DropPartition(t *testing.T) {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, colRep, func(pack *segmentFlushPack) { fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, colRep, func(pack *segmentFlushPack) {
fpMut.Lock() fpMut.Lock()
flushPacks = append(flushPacks, pack) flushPacks = append(flushPacks, pack)
@ -922,7 +922,7 @@ func TestInsertBufferNode_bufferInsertMsg(t *testing.T) {
} }
cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
for _, test := range tests { for _, test := range tests {
collMeta := Factory.GetCollectionMeta(test.collID, "collection", test.pkType) collMeta := Factory.GetCollectionMeta(test.collID, "collection", test.pkType)
rcf := &RootCoordFactory{ rcf := &RootCoordFactory{
@ -979,8 +979,10 @@ func TestInsertBufferNode_bufferInsertMsg(t *testing.T) {
} }
func TestInsertBufferNode_updateSegStatesInReplica(te *testing.T) { func TestInsertBufferNode_updateSegStatesInReplica(te *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(insertNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
invalideTests := []struct { invalideTests := []struct {
replicaCollID UniqueID replicaCollID UniqueID

View File

@ -567,9 +567,11 @@ type flushBufferInsertTask struct {
// flushInsertData implements flushInsertTask // flushInsertData implements flushInsertTask
func (t *flushBufferInsertTask) flushInsertData() error { func (t *flushBufferInsertTask) flushInsertData() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if t.ChunkManager != nil && len(t.data) > 0 { if t.ChunkManager != nil && len(t.data) > 0 {
tr := timerecord.NewTimeRecorder("insertData") tr := timerecord.NewTimeRecorder("insertData")
err := t.MultiWrite(t.data) err := t.MultiWrite(ctx, t.data)
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.InsertLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
if err == nil { if err == nil {
for _, d := range t.data { for _, d := range t.data {
@ -588,9 +590,11 @@ type flushBufferDeleteTask struct {
// flushDeleteData implements flushDeleteTask // flushDeleteData implements flushDeleteTask
func (t *flushBufferDeleteTask) flushDeleteData() error { func (t *flushBufferDeleteTask) flushDeleteData() error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if len(t.data) > 0 && t.ChunkManager != nil { if len(t.data) > 0 && t.ChunkManager != nil {
tr := timerecord.NewTimeRecorder("deleteData") tr := timerecord.NewTimeRecorder("deleteData")
err := t.MultiWrite(t.data) err := t.MultiWrite(ctx, t.data)
metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.DataNodeSave2StorageLatency.WithLabelValues(fmt.Sprint(Params.DataNodeCfg.GetNodeID()), metrics.DeleteLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
if err == nil { if err == nil {
for _, d := range t.data { for _, d := range t.data {

View File

@ -141,8 +141,10 @@ func TestOrderFlushQueue_Order(t *testing.T) {
} }
func TestRendezvousFlushManager(t *testing.T) { func TestRendezvousFlushManager(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(flushTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(flushTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
size := 1000 size := 1000
var counter atomic.Int64 var counter atomic.Int64
@ -178,8 +180,10 @@ func TestRendezvousFlushManager(t *testing.T) {
} }
func TestRendezvousFlushManager_Inject(t *testing.T) { func TestRendezvousFlushManager_Inject(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cm := storage.NewLocalChunkManager(storage.RootPath(flushTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(flushTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
size := 1000 size := 1000
var counter atomic.Int64 var counter atomic.Int64

View File

@ -377,7 +377,7 @@ func (replica *SegmentReplica) addSegment(req addSegmentReq) error {
} }
} }
// Set up bloom filter. // Set up bloom filter.
err := replica.initPKBloomFilter(seg, req.statsBinLogs, req.recoverTs) err := replica.initPKBloomFilter(context.TODO(), seg, req.statsBinLogs, req.recoverTs)
if err != nil { if err != nil {
log.Error("failed to init bloom filter", log.Error("failed to init bloom filter",
zap.Int64("segment ID", req.segID), zap.Int64("segment ID", req.segID),
@ -451,7 +451,7 @@ func (replica *SegmentReplica) filterSegments(channelName string, partitionID Un
return results return results
} }
func (replica *SegmentReplica) initPKBloomFilter(s *Segment, statsBinlogs []*datapb.FieldBinlog, ts Timestamp) error { func (replica *SegmentReplica) initPKBloomFilter(ctx context.Context, s *Segment, statsBinlogs []*datapb.FieldBinlog, ts Timestamp) error {
log := log.With(zap.Int64("segmentID", s.segmentID)) log := log.With(zap.Int64("segmentID", s.segmentID))
log.Info("begin to init pk bloom filter", zap.Int("stats bin logs", len(statsBinlogs))) log.Info("begin to init pk bloom filter", zap.Int("stats bin logs", len(statsBinlogs)))
schema, err := replica.getCollectionSchema(s.collectionID, ts) schema, err := replica.getCollectionSchema(s.collectionID, ts)
@ -486,7 +486,7 @@ func (replica *SegmentReplica) initPKBloomFilter(s *Segment, statsBinlogs []*dat
return replica.initSegmentBloomFilter(s) return replica.initSegmentBloomFilter(s)
} }
values, err := replica.chunkManager.MultiRead(bloomFilterFiles) values, err := replica.chunkManager.MultiRead(ctx, bloomFilterFiles)
if err != nil { if err != nil {
log.Warn("failed to load bloom filter files", zap.Error(err)) log.Warn("failed to load bloom filter files", zap.Error(err))
return err return err

View File

@ -42,7 +42,7 @@ var segmentReplicaNodeTestDir = "/tmp/milvus_test/segment_replica"
func TestNewReplica(t *testing.T) { func TestNewReplica(t *testing.T) {
rc := &RootCoordFactory{} rc := &RootCoordFactory{}
cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(context.Background(), "")
replica, err := newReplica(context.Background(), rc, cm, 0) replica, err := newReplica(context.Background(), rc, cm, 0)
assert.Nil(t, err) assert.Nil(t, err)
assert.NotNil(t, replica) assert.NotNil(t, replica)
@ -52,7 +52,7 @@ type mockDataCM struct {
storage.ChunkManager storage.ChunkManager
} }
func (kv *mockDataCM) MultiRead(keys []string) ([][]byte, error) { func (kv *mockDataCM) MultiRead(ctx context.Context, keys []string) ([][]byte, error) {
stats := &storage.PrimaryKeyStats{ stats := &storage.PrimaryKeyStats{
FieldID: common.RowIDField, FieldID: common.RowIDField,
Min: 0, Min: 0,
@ -67,7 +67,7 @@ type mockPkfilterMergeError struct {
storage.ChunkManager storage.ChunkManager
} }
func (kv *mockPkfilterMergeError) MultiRead(keys []string) ([][]byte, error) { func (kv *mockPkfilterMergeError) MultiRead(ctx context.Context, keys []string) ([][]byte, error) {
/* /*
stats := &storage.PrimaryKeyStats{ stats := &storage.PrimaryKeyStats{
FieldID: common.RowIDField, FieldID: common.RowIDField,
@ -84,7 +84,7 @@ type mockDataCMError struct {
storage.ChunkManager storage.ChunkManager
} }
func (kv *mockDataCMError) MultiRead(keys []string) ([][]byte, error) { func (kv *mockDataCMError) MultiRead(ctx context.Context, keys []string) ([][]byte, error) {
return nil, fmt.Errorf("mock error") return nil, fmt.Errorf("mock error")
} }
@ -92,7 +92,7 @@ type mockDataCMStatsError struct {
storage.ChunkManager storage.ChunkManager
} }
func (kv *mockDataCMStatsError) MultiRead(keys []string) ([][]byte, error) { func (kv *mockDataCMStatsError) MultiRead(ctx context.Context, keys []string) ([][]byte, error) {
return [][]byte{[]byte("3123123,error,test")}, nil return [][]byte{[]byte("3123123,error,test")}, nil
} }
@ -226,12 +226,14 @@ func TestSegmentReplica_getCollectionAndPartitionID(te *testing.T) {
} }
func TestSegmentReplica(t *testing.T) { func TestSegmentReplica(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rc := &RootCoordFactory{ rc := &RootCoordFactory{
pkType: schemapb.DataType_Int64, pkType: schemapb.DataType_Int64,
} }
collID := UniqueID(1) collID := UniqueID(1)
cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
t.Run("Test coll mot match", func(t *testing.T) { t.Run("Test coll mot match", func(t *testing.T) {
replica, err := newReplica(context.Background(), rc, cm, collID) replica, err := newReplica(context.Background(), rc, cm, collID)
@ -318,11 +320,13 @@ func TestSegmentReplica(t *testing.T) {
} }
func TestSegmentReplica_InterfaceMethod(t *testing.T) { func TestSegmentReplica_InterfaceMethod(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rc := &RootCoordFactory{ rc := &RootCoordFactory{
pkType: schemapb.DataType_Int64, pkType: schemapb.DataType_Int64,
} }
cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
t.Run("Test addFlushedSegmentWithPKs", func(t *testing.T) { t.Run("Test addFlushedSegmentWithPKs", func(t *testing.T) {
tests := []struct { tests := []struct {
@ -882,12 +886,14 @@ func TestSegmentReplica_InterfaceMethod(t *testing.T) {
} }
func TestInnerFunctionSegment(t *testing.T) { func TestInnerFunctionSegment(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rc := &RootCoordFactory{ rc := &RootCoordFactory{
pkType: schemapb.DataType_Int64, pkType: schemapb.DataType_Int64,
} }
collID := UniqueID(1) collID := UniqueID(1)
cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
replica, err := newReplica(context.Background(), rc, cm, collID) replica, err := newReplica(context.Background(), rc, cm, collID)
assert.Nil(t, err) assert.Nil(t, err)
replica.chunkManager = &mockDataCM{} replica.chunkManager = &mockDataCM{}
@ -1093,6 +1099,8 @@ func TestSegment_getSegmentStatslog(t *testing.T) {
} }
func TestReplica_UpdatePKRange(t *testing.T) { func TestReplica_UpdatePKRange(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rc := &RootCoordFactory{ rc := &RootCoordFactory{
pkType: schemapb.DataType_Int64, pkType: schemapb.DataType_Int64,
} }
@ -1105,7 +1113,7 @@ func TestReplica_UpdatePKRange(t *testing.T) {
cp := &segmentCheckPoint{int64(10), *cpPos} cp := &segmentCheckPoint{int64(10), *cpPos}
cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(segmentReplicaNodeTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
replica, err := newReplica(context.Background(), rc, cm, collID) replica, err := newReplica(context.Background(), rc, cm, collID)
assert.Nil(t, err) assert.Nil(t, err)
replica.chunkManager = &mockDataCM{} replica.chunkManager = &mockDataCM{}
@ -1187,8 +1195,7 @@ func (s *SegmentReplicaSuite) SetupSuite() {
} }
func (s *SegmentReplicaSuite) TearDownSuite() { func (s *SegmentReplicaSuite) TearDownSuite() {
s.cm.RemoveWithPrefix("") s.cm.RemoveWithPrefix(context.Background(), "")
} }
func (s *SegmentReplicaSuite) SetupTest() { func (s *SegmentReplicaSuite) SetupTest() {

View File

@ -215,7 +215,7 @@ func (gc *garbageCollector) recycleUnusedIndexFiles() {
case <-ticker.C: case <-ticker.C:
prefix := path.Join(gc.chunkManager.RootPath(), common.SegmentIndexPath) + "/" prefix := path.Join(gc.chunkManager.RootPath(), common.SegmentIndexPath) + "/"
// list dir first // list dir first
keys, _, err := gc.chunkManager.ListWithPrefix(prefix, false) keys, _, err := gc.chunkManager.ListWithPrefix(gc.ctx, prefix, false)
if err != nil { if err != nil {
log.Ctx(gc.ctx).Error("IndexCoord garbageCollector recycleUnusedIndexFiles list keys from chunk manager failed", zap.Error(err)) log.Ctx(gc.ctx).Error("IndexCoord garbageCollector recycleUnusedIndexFiles list keys from chunk manager failed", zap.Error(err))
continue continue
@ -232,7 +232,7 @@ func (gc *garbageCollector) recycleUnusedIndexFiles() {
// buildID no longer exists in meta, remove all index files // buildID no longer exists in meta, remove all index files
log.Ctx(gc.ctx).Info("IndexCoord garbageCollector recycleUnusedIndexFiles find meta has not exist, remove index files", log.Ctx(gc.ctx).Info("IndexCoord garbageCollector recycleUnusedIndexFiles find meta has not exist, remove index files",
zap.Int64("buildID", buildID)) zap.Int64("buildID", buildID))
err = gc.chunkManager.RemoveWithPrefix(key) err = gc.chunkManager.RemoveWithPrefix(gc.ctx, key)
if err != nil { if err != nil {
log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector recycleUnusedIndexFiles remove index files failed", log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector recycleUnusedIndexFiles remove index files failed",
zap.Int64("buildID", buildID), zap.String("prefix", key), zap.Error(err)) zap.Int64("buildID", buildID), zap.String("prefix", key), zap.Error(err))
@ -252,7 +252,7 @@ func (gc *garbageCollector) recycleUnusedIndexFiles() {
for _, file := range indexFilePaths { for _, file := range indexFilePaths {
filesMap[file] = true filesMap[file] = true
} }
files, _, err := gc.chunkManager.ListWithPrefix(key, true) files, _, err := gc.chunkManager.ListWithPrefix(gc.ctx, key, true)
if err != nil { if err != nil {
log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector recycleUnusedIndexFiles list files failed", log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector recycleUnusedIndexFiles list files failed",
zap.Int64("buildID", buildID), zap.String("prefix", key), zap.Error(err)) zap.Int64("buildID", buildID), zap.String("prefix", key), zap.Error(err))
@ -263,7 +263,7 @@ func (gc *garbageCollector) recycleUnusedIndexFiles() {
deletedFilesNum := 0 deletedFilesNum := 0
for _, file := range files { for _, file := range files {
if _, ok := filesMap[file]; !ok { if _, ok := filesMap[file]; !ok {
if err = gc.chunkManager.Remove(file); err != nil { if err = gc.chunkManager.Remove(gc.ctx, file); err != nil {
log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector recycleUnusedIndexFiles remove file failed", log.Ctx(gc.ctx).Warn("IndexCoord garbageCollector recycleUnusedIndexFiles remove file failed",
zap.Int64("buildID", buildID), zap.String("file", file), zap.Error(err)) zap.Int64("buildID", buildID), zap.String("file", file), zap.Error(err))
continue continue

View File

@ -575,14 +575,14 @@ func (cmm *chunkManagerMock) RootPath() string {
return "" return ""
} }
func (cmm *chunkManagerMock) RemoveWithPrefix(prefix string) error { func (cmm *chunkManagerMock) RemoveWithPrefix(ctx context.Context, prefix string) error {
return cmm.removeWithPrefix(prefix) return cmm.removeWithPrefix(prefix)
} }
func (cmm *chunkManagerMock) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (cmm *chunkManagerMock) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
return cmm.listWithPrefix(prefix, recursive) return cmm.listWithPrefix(prefix, recursive)
} }
func (cmm *chunkManagerMock) Remove(key string) error { func (cmm *chunkManagerMock) Remove(ctx context.Context, key string) error {
return cmm.remove(key) return cmm.remove(key)
} }

View File

@ -95,32 +95,32 @@ func (c *mockChunkmgr) RootPath() string {
return "" return ""
} }
func (c *mockChunkmgr) Path(filePath string) (string, error) { func (c *mockChunkmgr) Path(ctx context.Context, filePath string) (string, error) {
// TODO // TODO
return filePath, errNotImplErr return filePath, errNotImplErr
} }
func (c *mockChunkmgr) Size(filePath string) (int64, error) { func (c *mockChunkmgr) Size(ctx context.Context, filePath string) (int64, error) {
// TODO // TODO
return 0, errNotImplErr return 0, errNotImplErr
} }
func (c *mockChunkmgr) Write(filePath string, content []byte) error { func (c *mockChunkmgr) Write(ctx context.Context, filePath string, content []byte) error {
c.indexedData.Store(filePath, content) c.indexedData.Store(filePath, content)
return nil return nil
} }
func (c *mockChunkmgr) MultiWrite(contents map[string][]byte) error { func (c *mockChunkmgr) MultiWrite(ctx context.Context, contents map[string][]byte) error {
// TODO // TODO
return errNotImplErr return errNotImplErr
} }
func (c *mockChunkmgr) Exist(filePath string) (bool, error) { func (c *mockChunkmgr) Exist(ctx context.Context, filePath string) (bool, error) {
// TODO // TODO
return false, errNotImplErr return false, errNotImplErr
} }
func (c *mockChunkmgr) Read(filePath string) ([]byte, error) { func (c *mockChunkmgr) Read(ctx context.Context, filePath string) ([]byte, error) {
value, ok := c.segmentData.Load(filePath) value, ok := c.segmentData.Load(filePath)
if !ok { if !ok {
return nil, fmt.Errorf("data not exists") return nil, fmt.Errorf("data not exists")
@ -128,47 +128,47 @@ func (c *mockChunkmgr) Read(filePath string) ([]byte, error) {
return value.(*storage.Blob).Value, nil return value.(*storage.Blob).Value, nil
} }
func (c *mockChunkmgr) Reader(filePath string) (storage.FileReader, error) { func (c *mockChunkmgr) Reader(ctx context.Context, filePath string) (storage.FileReader, error) {
// TODO // TODO
return nil, errNotImplErr return nil, errNotImplErr
} }
func (c *mockChunkmgr) MultiRead(filePaths []string) ([][]byte, error) { func (c *mockChunkmgr) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error) {
// TODO // TODO
return nil, errNotImplErr return nil, errNotImplErr
} }
func (c *mockChunkmgr) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (c *mockChunkmgr) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
// TODO // TODO
return nil, nil, errNotImplErr return nil, nil, errNotImplErr
} }
func (c *mockChunkmgr) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (c *mockChunkmgr) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
// TODO // TODO
return nil, nil, errNotImplErr return nil, nil, errNotImplErr
} }
func (c *mockChunkmgr) Mmap(filePath string) (*mmap.ReaderAt, error) { func (c *mockChunkmgr) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error) {
// TODO // TODO
return nil, errNotImplErr return nil, errNotImplErr
} }
func (c *mockChunkmgr) ReadAt(filePath string, off int64, length int64) ([]byte, error) { func (c *mockChunkmgr) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {
// TODO // TODO
return nil, errNotImplErr return nil, errNotImplErr
} }
func (c *mockChunkmgr) Remove(filePath string) error { func (c *mockChunkmgr) Remove(ctx context.Context, filePath string) error {
// TODO // TODO
return errNotImplErr return errNotImplErr
} }
func (c *mockChunkmgr) MultiRemove(filePaths []string) error { func (c *mockChunkmgr) MultiRemove(ctx context.Context, filePaths []string) error {
// TODO // TODO
return errNotImplErr return errNotImplErr
} }
func (c *mockChunkmgr) RemoveWithPrefix(prefix string) error { func (c *mockChunkmgr) RemoveWithPrefix(ctx context.Context, prefix string) error {
// TODO // TODO
return errNotImplErr return errNotImplErr
} }

View File

@ -216,7 +216,7 @@ func (it *indexBuildTask) Prepare(ctx context.Context) error {
func (it *indexBuildTask) LoadData(ctx context.Context) error { func (it *indexBuildTask) LoadData(ctx context.Context) error {
getValueByPath := func(path string) ([]byte, error) { getValueByPath := func(path string) ([]byte, error) {
data, err := it.cm.Read(path) data, err := it.cm.Read(ctx, path)
if err != nil { if err != nil {
if errors.Is(err, ErrNoSuchKey) { if errors.Is(err, ErrNoSuchKey) {
return nil, ErrNoSuchKey return nil, ErrNoSuchKey
@ -444,7 +444,7 @@ func (it *indexBuildTask) SaveIndexFiles(ctx context.Context) error {
blob := it.indexBlobs[idx] blob := it.indexBlobs[idx]
savePath := getSavePathByKey(blob.Key) savePath := getSavePathByKey(blob.Key)
saveFn := func() error { saveFn := func() error {
return it.cm.Write(savePath, blob.Value) return it.cm.Write(ctx, savePath, blob.Value)
} }
if err := retry.Do(ctx, saveFn, retry.Attempts(5)); err != nil { if err := retry.Do(ctx, saveFn, retry.Attempts(5)); err != nil {
log.Ctx(ctx).Warn("index node save index file failed", zap.Error(err), zap.String("savePath", savePath)) log.Ctx(ctx).Warn("index node save index file failed", zap.Error(err), zap.String("savePath", savePath))
@ -502,7 +502,7 @@ func (it *indexBuildTask) SaveDiskAnnIndexFiles(ctx context.Context) error {
indexParamPath := getSavePathByKey(indexParamBlob.Key) indexParamPath := getSavePathByKey(indexParamBlob.Key)
saveFn := func() error { saveFn := func() error {
return it.cm.Write(indexParamPath, indexParamBlob.Value) return it.cm.Write(ctx, indexParamPath, indexParamBlob.Value)
} }
if err := retry.Do(ctx, saveFn, retry.Attempts(5)); err != nil { if err := retry.Do(ctx, saveFn, retry.Attempts(5)); err != nil {
log.Ctx(ctx).Warn("index node save index param file failed", zap.Error(err), zap.String("savePath", indexParamPath)) log.Ctx(ctx).Warn("index node save index param file failed", zap.Error(err), zap.String("savePath", indexParamPath))

View File

@ -3,9 +3,12 @@
package mocks package mocks
import ( import (
mock "github.com/stretchr/testify/mock" context "context"
mmap "golang.org/x/exp/mmap" mmap "golang.org/x/exp/mmap"
mock "github.com/stretchr/testify/mock"
storage "github.com/milvus-io/milvus/internal/storage" storage "github.com/milvus-io/milvus/internal/storage"
time "time" time "time"
@ -24,20 +27,20 @@ func (_m *ChunkManager) EXPECT() *ChunkManager_Expecter {
return &ChunkManager_Expecter{mock: &_m.Mock} return &ChunkManager_Expecter{mock: &_m.Mock}
} }
// Exist provides a mock function with given fields: filePath // Exist provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Exist(filePath string) (bool, error) { func (_m *ChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 bool var r0 bool
if rf, ok := ret.Get(0).(func(string) bool); ok { if rf, ok := ret.Get(0).(func(context.Context, string) bool); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
r0 = ret.Get(0).(bool) r0 = ret.Get(0).(bool)
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(filePath) r1 = rf(ctx, filePath)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -51,14 +54,15 @@ type ChunkManager_Exist_Call struct {
} }
// Exist is a helper method to define mock.On call // Exist is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Exist(filePath interface{}) *ChunkManager_Exist_Call { func (_e *ChunkManager_Expecter) Exist(ctx interface{}, filePath interface{}) *ChunkManager_Exist_Call {
return &ChunkManager_Exist_Call{Call: _e.mock.On("Exist", filePath)} return &ChunkManager_Exist_Call{Call: _e.mock.On("Exist", ctx, filePath)}
} }
func (_c *ChunkManager_Exist_Call) Run(run func(filePath string)) *ChunkManager_Exist_Call { func (_c *ChunkManager_Exist_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Exist_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -68,13 +72,13 @@ func (_c *ChunkManager_Exist_Call) Return(_a0 bool, _a1 error) *ChunkManager_Exi
return _c return _c
} }
// ListWithPrefix provides a mock function with given fields: prefix, recursive // ListWithPrefix provides a mock function with given fields: ctx, prefix, recursive
func (_m *ChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (_m *ChunkManager) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
ret := _m.Called(prefix, recursive) ret := _m.Called(ctx, prefix, recursive)
var r0 []string var r0 []string
if rf, ok := ret.Get(0).(func(string, bool) []string); ok { if rf, ok := ret.Get(0).(func(context.Context, string, bool) []string); ok {
r0 = rf(prefix, recursive) r0 = rf(ctx, prefix, recursive)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).([]string) r0 = ret.Get(0).([]string)
@ -82,8 +86,8 @@ func (_m *ChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string,
} }
var r1 []time.Time var r1 []time.Time
if rf, ok := ret.Get(1).(func(string, bool) []time.Time); ok { if rf, ok := ret.Get(1).(func(context.Context, string, bool) []time.Time); ok {
r1 = rf(prefix, recursive) r1 = rf(ctx, prefix, recursive)
} else { } else {
if ret.Get(1) != nil { if ret.Get(1) != nil {
r1 = ret.Get(1).([]time.Time) r1 = ret.Get(1).([]time.Time)
@ -91,8 +95,8 @@ func (_m *ChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string,
} }
var r2 error var r2 error
if rf, ok := ret.Get(2).(func(string, bool) error); ok { if rf, ok := ret.Get(2).(func(context.Context, string, bool) error); ok {
r2 = rf(prefix, recursive) r2 = rf(ctx, prefix, recursive)
} else { } else {
r2 = ret.Error(2) r2 = ret.Error(2)
} }
@ -106,15 +110,16 @@ type ChunkManager_ListWithPrefix_Call struct {
} }
// ListWithPrefix is a helper method to define mock.On call // ListWithPrefix is a helper method to define mock.On call
// - ctx context.Context
// - prefix string // - prefix string
// - recursive bool // - recursive bool
func (_e *ChunkManager_Expecter) ListWithPrefix(prefix interface{}, recursive interface{}) *ChunkManager_ListWithPrefix_Call { func (_e *ChunkManager_Expecter) ListWithPrefix(ctx interface{}, prefix interface{}, recursive interface{}) *ChunkManager_ListWithPrefix_Call {
return &ChunkManager_ListWithPrefix_Call{Call: _e.mock.On("ListWithPrefix", prefix, recursive)} return &ChunkManager_ListWithPrefix_Call{Call: _e.mock.On("ListWithPrefix", ctx, prefix, recursive)}
} }
func (_c *ChunkManager_ListWithPrefix_Call) Run(run func(prefix string, recursive bool)) *ChunkManager_ListWithPrefix_Call { func (_c *ChunkManager_ListWithPrefix_Call) Run(run func(ctx context.Context, prefix string, recursive bool)) *ChunkManager_ListWithPrefix_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string), args[1].(bool)) run(args[0].(context.Context), args[1].(string), args[2].(bool))
}) })
return _c return _c
} }
@ -124,13 +129,13 @@ func (_c *ChunkManager_ListWithPrefix_Call) Return(_a0 []string, _a1 []time.Time
return _c return _c
} }
// Mmap provides a mock function with given fields: filePath // Mmap provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) { func (_m *ChunkManager) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error) {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 *mmap.ReaderAt var r0 *mmap.ReaderAt
if rf, ok := ret.Get(0).(func(string) *mmap.ReaderAt); ok { if rf, ok := ret.Get(0).(func(context.Context, string) *mmap.ReaderAt); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).(*mmap.ReaderAt) r0 = ret.Get(0).(*mmap.ReaderAt)
@ -138,8 +143,8 @@ func (_m *ChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(filePath) r1 = rf(ctx, filePath)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -153,14 +158,15 @@ type ChunkManager_Mmap_Call struct {
} }
// Mmap is a helper method to define mock.On call // Mmap is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Mmap(filePath interface{}) *ChunkManager_Mmap_Call { func (_e *ChunkManager_Expecter) Mmap(ctx interface{}, filePath interface{}) *ChunkManager_Mmap_Call {
return &ChunkManager_Mmap_Call{Call: _e.mock.On("Mmap", filePath)} return &ChunkManager_Mmap_Call{Call: _e.mock.On("Mmap", ctx, filePath)}
} }
func (_c *ChunkManager_Mmap_Call) Run(run func(filePath string)) *ChunkManager_Mmap_Call { func (_c *ChunkManager_Mmap_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Mmap_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -170,13 +176,13 @@ func (_c *ChunkManager_Mmap_Call) Return(_a0 *mmap.ReaderAt, _a1 error) *ChunkMa
return _c return _c
} }
// MultiRead provides a mock function with given fields: filePaths // MultiRead provides a mock function with given fields: ctx, filePaths
func (_m *ChunkManager) MultiRead(filePaths []string) ([][]byte, error) { func (_m *ChunkManager) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error) {
ret := _m.Called(filePaths) ret := _m.Called(ctx, filePaths)
var r0 [][]byte var r0 [][]byte
if rf, ok := ret.Get(0).(func([]string) [][]byte); ok { if rf, ok := ret.Get(0).(func(context.Context, []string) [][]byte); ok {
r0 = rf(filePaths) r0 = rf(ctx, filePaths)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).([][]byte) r0 = ret.Get(0).([][]byte)
@ -184,8 +190,8 @@ func (_m *ChunkManager) MultiRead(filePaths []string) ([][]byte, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func([]string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, []string) error); ok {
r1 = rf(filePaths) r1 = rf(ctx, filePaths)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -199,14 +205,15 @@ type ChunkManager_MultiRead_Call struct {
} }
// MultiRead is a helper method to define mock.On call // MultiRead is a helper method to define mock.On call
// - ctx context.Context
// - filePaths []string // - filePaths []string
func (_e *ChunkManager_Expecter) MultiRead(filePaths interface{}) *ChunkManager_MultiRead_Call { func (_e *ChunkManager_Expecter) MultiRead(ctx interface{}, filePaths interface{}) *ChunkManager_MultiRead_Call {
return &ChunkManager_MultiRead_Call{Call: _e.mock.On("MultiRead", filePaths)} return &ChunkManager_MultiRead_Call{Call: _e.mock.On("MultiRead", ctx, filePaths)}
} }
func (_c *ChunkManager_MultiRead_Call) Run(run func(filePaths []string)) *ChunkManager_MultiRead_Call { func (_c *ChunkManager_MultiRead_Call) Run(run func(ctx context.Context, filePaths []string)) *ChunkManager_MultiRead_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].([]string)) run(args[0].(context.Context), args[1].([]string))
}) })
return _c return _c
} }
@ -216,13 +223,13 @@ func (_c *ChunkManager_MultiRead_Call) Return(_a0 [][]byte, _a1 error) *ChunkMan
return _c return _c
} }
// MultiRemove provides a mock function with given fields: filePaths // MultiRemove provides a mock function with given fields: ctx, filePaths
func (_m *ChunkManager) MultiRemove(filePaths []string) error { func (_m *ChunkManager) MultiRemove(ctx context.Context, filePaths []string) error {
ret := _m.Called(filePaths) ret := _m.Called(ctx, filePaths)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func([]string) error); ok { if rf, ok := ret.Get(0).(func(context.Context, []string) error); ok {
r0 = rf(filePaths) r0 = rf(ctx, filePaths)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }
@ -236,14 +243,15 @@ type ChunkManager_MultiRemove_Call struct {
} }
// MultiRemove is a helper method to define mock.On call // MultiRemove is a helper method to define mock.On call
// - ctx context.Context
// - filePaths []string // - filePaths []string
func (_e *ChunkManager_Expecter) MultiRemove(filePaths interface{}) *ChunkManager_MultiRemove_Call { func (_e *ChunkManager_Expecter) MultiRemove(ctx interface{}, filePaths interface{}) *ChunkManager_MultiRemove_Call {
return &ChunkManager_MultiRemove_Call{Call: _e.mock.On("MultiRemove", filePaths)} return &ChunkManager_MultiRemove_Call{Call: _e.mock.On("MultiRemove", ctx, filePaths)}
} }
func (_c *ChunkManager_MultiRemove_Call) Run(run func(filePaths []string)) *ChunkManager_MultiRemove_Call { func (_c *ChunkManager_MultiRemove_Call) Run(run func(ctx context.Context, filePaths []string)) *ChunkManager_MultiRemove_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].([]string)) run(args[0].(context.Context), args[1].([]string))
}) })
return _c return _c
} }
@ -253,13 +261,13 @@ func (_c *ChunkManager_MultiRemove_Call) Return(_a0 error) *ChunkManager_MultiRe
return _c return _c
} }
// MultiWrite provides a mock function with given fields: contents // MultiWrite provides a mock function with given fields: ctx, contents
func (_m *ChunkManager) MultiWrite(contents map[string][]byte) error { func (_m *ChunkManager) MultiWrite(ctx context.Context, contents map[string][]byte) error {
ret := _m.Called(contents) ret := _m.Called(ctx, contents)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(map[string][]byte) error); ok { if rf, ok := ret.Get(0).(func(context.Context, map[string][]byte) error); ok {
r0 = rf(contents) r0 = rf(ctx, contents)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }
@ -273,14 +281,15 @@ type ChunkManager_MultiWrite_Call struct {
} }
// MultiWrite is a helper method to define mock.On call // MultiWrite is a helper method to define mock.On call
// - ctx context.Context
// - contents map[string][]byte // - contents map[string][]byte
func (_e *ChunkManager_Expecter) MultiWrite(contents interface{}) *ChunkManager_MultiWrite_Call { func (_e *ChunkManager_Expecter) MultiWrite(ctx interface{}, contents interface{}) *ChunkManager_MultiWrite_Call {
return &ChunkManager_MultiWrite_Call{Call: _e.mock.On("MultiWrite", contents)} return &ChunkManager_MultiWrite_Call{Call: _e.mock.On("MultiWrite", ctx, contents)}
} }
func (_c *ChunkManager_MultiWrite_Call) Run(run func(contents map[string][]byte)) *ChunkManager_MultiWrite_Call { func (_c *ChunkManager_MultiWrite_Call) Run(run func(ctx context.Context, contents map[string][]byte)) *ChunkManager_MultiWrite_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(map[string][]byte)) run(args[0].(context.Context), args[1].(map[string][]byte))
}) })
return _c return _c
} }
@ -290,20 +299,20 @@ func (_c *ChunkManager_MultiWrite_Call) Return(_a0 error) *ChunkManager_MultiWri
return _c return _c
} }
// Path provides a mock function with given fields: filePath // Path provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Path(filePath string) (string, error) { func (_m *ChunkManager) Path(ctx context.Context, filePath string) (string, error) {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 string var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok { if rf, ok := ret.Get(0).(func(context.Context, string) string); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
r0 = ret.Get(0).(string) r0 = ret.Get(0).(string)
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(filePath) r1 = rf(ctx, filePath)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -317,14 +326,15 @@ type ChunkManager_Path_Call struct {
} }
// Path is a helper method to define mock.On call // Path is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Path(filePath interface{}) *ChunkManager_Path_Call { func (_e *ChunkManager_Expecter) Path(ctx interface{}, filePath interface{}) *ChunkManager_Path_Call {
return &ChunkManager_Path_Call{Call: _e.mock.On("Path", filePath)} return &ChunkManager_Path_Call{Call: _e.mock.On("Path", ctx, filePath)}
} }
func (_c *ChunkManager_Path_Call) Run(run func(filePath string)) *ChunkManager_Path_Call { func (_c *ChunkManager_Path_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Path_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -334,13 +344,13 @@ func (_c *ChunkManager_Path_Call) Return(_a0 string, _a1 error) *ChunkManager_Pa
return _c return _c
} }
// Read provides a mock function with given fields: filePath // Read provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Read(filePath string) ([]byte, error) { func (_m *ChunkManager) Read(ctx context.Context, filePath string) ([]byte, error) {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 []byte var r0 []byte
if rf, ok := ret.Get(0).(func(string) []byte); ok { if rf, ok := ret.Get(0).(func(context.Context, string) []byte); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte) r0 = ret.Get(0).([]byte)
@ -348,8 +358,8 @@ func (_m *ChunkManager) Read(filePath string) ([]byte, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(filePath) r1 = rf(ctx, filePath)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -363,14 +373,15 @@ type ChunkManager_Read_Call struct {
} }
// Read is a helper method to define mock.On call // Read is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Read(filePath interface{}) *ChunkManager_Read_Call { func (_e *ChunkManager_Expecter) Read(ctx interface{}, filePath interface{}) *ChunkManager_Read_Call {
return &ChunkManager_Read_Call{Call: _e.mock.On("Read", filePath)} return &ChunkManager_Read_Call{Call: _e.mock.On("Read", ctx, filePath)}
} }
func (_c *ChunkManager_Read_Call) Run(run func(filePath string)) *ChunkManager_Read_Call { func (_c *ChunkManager_Read_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Read_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -380,13 +391,13 @@ func (_c *ChunkManager_Read_Call) Return(_a0 []byte, _a1 error) *ChunkManager_Re
return _c return _c
} }
// ReadAt provides a mock function with given fields: filePath, off, length // ReadAt provides a mock function with given fields: ctx, filePath, off, length
func (_m *ChunkManager) ReadAt(filePath string, off int64, length int64) ([]byte, error) { func (_m *ChunkManager) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {
ret := _m.Called(filePath, off, length) ret := _m.Called(ctx, filePath, off, length)
var r0 []byte var r0 []byte
if rf, ok := ret.Get(0).(func(string, int64, int64) []byte); ok { if rf, ok := ret.Get(0).(func(context.Context, string, int64, int64) []byte); ok {
r0 = rf(filePath, off, length) r0 = rf(ctx, filePath, off, length)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte) r0 = ret.Get(0).([]byte)
@ -394,8 +405,8 @@ func (_m *ChunkManager) ReadAt(filePath string, off int64, length int64) ([]byte
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string, int64, int64) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string, int64, int64) error); ok {
r1 = rf(filePath, off, length) r1 = rf(ctx, filePath, off, length)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -409,16 +420,17 @@ type ChunkManager_ReadAt_Call struct {
} }
// ReadAt is a helper method to define mock.On call // ReadAt is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
// - off int64 // - off int64
// - length int64 // - length int64
func (_e *ChunkManager_Expecter) ReadAt(filePath interface{}, off interface{}, length interface{}) *ChunkManager_ReadAt_Call { func (_e *ChunkManager_Expecter) ReadAt(ctx interface{}, filePath interface{}, off interface{}, length interface{}) *ChunkManager_ReadAt_Call {
return &ChunkManager_ReadAt_Call{Call: _e.mock.On("ReadAt", filePath, off, length)} return &ChunkManager_ReadAt_Call{Call: _e.mock.On("ReadAt", ctx, filePath, off, length)}
} }
func (_c *ChunkManager_ReadAt_Call) Run(run func(filePath string, off int64, length int64)) *ChunkManager_ReadAt_Call { func (_c *ChunkManager_ReadAt_Call) Run(run func(ctx context.Context, filePath string, off int64, length int64)) *ChunkManager_ReadAt_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string), args[1].(int64), args[2].(int64)) run(args[0].(context.Context), args[1].(string), args[2].(int64), args[3].(int64))
}) })
return _c return _c
} }
@ -428,13 +440,13 @@ func (_c *ChunkManager_ReadAt_Call) Return(p []byte, err error) *ChunkManager_Re
return _c return _c
} }
// ReadWithPrefix provides a mock function with given fields: prefix // ReadWithPrefix provides a mock function with given fields: ctx, prefix
func (_m *ChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (_m *ChunkManager) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
ret := _m.Called(prefix) ret := _m.Called(ctx, prefix)
var r0 []string var r0 []string
if rf, ok := ret.Get(0).(func(string) []string); ok { if rf, ok := ret.Get(0).(func(context.Context, string) []string); ok {
r0 = rf(prefix) r0 = rf(ctx, prefix)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).([]string) r0 = ret.Get(0).([]string)
@ -442,8 +454,8 @@ func (_m *ChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error
} }
var r1 [][]byte var r1 [][]byte
if rf, ok := ret.Get(1).(func(string) [][]byte); ok { if rf, ok := ret.Get(1).(func(context.Context, string) [][]byte); ok {
r1 = rf(prefix) r1 = rf(ctx, prefix)
} else { } else {
if ret.Get(1) != nil { if ret.Get(1) != nil {
r1 = ret.Get(1).([][]byte) r1 = ret.Get(1).([][]byte)
@ -451,8 +463,8 @@ func (_m *ChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error
} }
var r2 error var r2 error
if rf, ok := ret.Get(2).(func(string) error); ok { if rf, ok := ret.Get(2).(func(context.Context, string) error); ok {
r2 = rf(prefix) r2 = rf(ctx, prefix)
} else { } else {
r2 = ret.Error(2) r2 = ret.Error(2)
} }
@ -466,14 +478,15 @@ type ChunkManager_ReadWithPrefix_Call struct {
} }
// ReadWithPrefix is a helper method to define mock.On call // ReadWithPrefix is a helper method to define mock.On call
// - ctx context.Context
// - prefix string // - prefix string
func (_e *ChunkManager_Expecter) ReadWithPrefix(prefix interface{}) *ChunkManager_ReadWithPrefix_Call { func (_e *ChunkManager_Expecter) ReadWithPrefix(ctx interface{}, prefix interface{}) *ChunkManager_ReadWithPrefix_Call {
return &ChunkManager_ReadWithPrefix_Call{Call: _e.mock.On("ReadWithPrefix", prefix)} return &ChunkManager_ReadWithPrefix_Call{Call: _e.mock.On("ReadWithPrefix", ctx, prefix)}
} }
func (_c *ChunkManager_ReadWithPrefix_Call) Run(run func(prefix string)) *ChunkManager_ReadWithPrefix_Call { func (_c *ChunkManager_ReadWithPrefix_Call) Run(run func(ctx context.Context, prefix string)) *ChunkManager_ReadWithPrefix_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -483,13 +496,13 @@ func (_c *ChunkManager_ReadWithPrefix_Call) Return(_a0 []string, _a1 [][]byte, _
return _c return _c
} }
// Reader provides a mock function with given fields: filePath // Reader provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Reader(filePath string) (storage.FileReader, error) { func (_m *ChunkManager) Reader(ctx context.Context, filePath string) (storage.FileReader, error) {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 storage.FileReader var r0 storage.FileReader
if rf, ok := ret.Get(0).(func(string) storage.FileReader); ok { if rf, ok := ret.Get(0).(func(context.Context, string) storage.FileReader); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
if ret.Get(0) != nil { if ret.Get(0) != nil {
r0 = ret.Get(0).(storage.FileReader) r0 = ret.Get(0).(storage.FileReader)
@ -497,8 +510,8 @@ func (_m *ChunkManager) Reader(filePath string) (storage.FileReader, error) {
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(filePath) r1 = rf(ctx, filePath)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -512,14 +525,15 @@ type ChunkManager_Reader_Call struct {
} }
// Reader is a helper method to define mock.On call // Reader is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Reader(filePath interface{}) *ChunkManager_Reader_Call { func (_e *ChunkManager_Expecter) Reader(ctx interface{}, filePath interface{}) *ChunkManager_Reader_Call {
return &ChunkManager_Reader_Call{Call: _e.mock.On("Reader", filePath)} return &ChunkManager_Reader_Call{Call: _e.mock.On("Reader", ctx, filePath)}
} }
func (_c *ChunkManager_Reader_Call) Run(run func(filePath string)) *ChunkManager_Reader_Call { func (_c *ChunkManager_Reader_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Reader_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -529,13 +543,13 @@ func (_c *ChunkManager_Reader_Call) Return(_a0 storage.FileReader, _a1 error) *C
return _c return _c
} }
// Remove provides a mock function with given fields: filePath // Remove provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Remove(filePath string) error { func (_m *ChunkManager) Remove(ctx context.Context, filePath string) error {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok { if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }
@ -549,14 +563,15 @@ type ChunkManager_Remove_Call struct {
} }
// Remove is a helper method to define mock.On call // Remove is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Remove(filePath interface{}) *ChunkManager_Remove_Call { func (_e *ChunkManager_Expecter) Remove(ctx interface{}, filePath interface{}) *ChunkManager_Remove_Call {
return &ChunkManager_Remove_Call{Call: _e.mock.On("Remove", filePath)} return &ChunkManager_Remove_Call{Call: _e.mock.On("Remove", ctx, filePath)}
} }
func (_c *ChunkManager_Remove_Call) Run(run func(filePath string)) *ChunkManager_Remove_Call { func (_c *ChunkManager_Remove_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Remove_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -566,13 +581,13 @@ func (_c *ChunkManager_Remove_Call) Return(_a0 error) *ChunkManager_Remove_Call
return _c return _c
} }
// RemoveWithPrefix provides a mock function with given fields: prefix // RemoveWithPrefix provides a mock function with given fields: ctx, prefix
func (_m *ChunkManager) RemoveWithPrefix(prefix string) error { func (_m *ChunkManager) RemoveWithPrefix(ctx context.Context, prefix string) error {
ret := _m.Called(prefix) ret := _m.Called(ctx, prefix)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok { if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(prefix) r0 = rf(ctx, prefix)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }
@ -586,14 +601,15 @@ type ChunkManager_RemoveWithPrefix_Call struct {
} }
// RemoveWithPrefix is a helper method to define mock.On call // RemoveWithPrefix is a helper method to define mock.On call
// - ctx context.Context
// - prefix string // - prefix string
func (_e *ChunkManager_Expecter) RemoveWithPrefix(prefix interface{}) *ChunkManager_RemoveWithPrefix_Call { func (_e *ChunkManager_Expecter) RemoveWithPrefix(ctx interface{}, prefix interface{}) *ChunkManager_RemoveWithPrefix_Call {
return &ChunkManager_RemoveWithPrefix_Call{Call: _e.mock.On("RemoveWithPrefix", prefix)} return &ChunkManager_RemoveWithPrefix_Call{Call: _e.mock.On("RemoveWithPrefix", ctx, prefix)}
} }
func (_c *ChunkManager_RemoveWithPrefix_Call) Run(run func(prefix string)) *ChunkManager_RemoveWithPrefix_Call { func (_c *ChunkManager_RemoveWithPrefix_Call) Run(run func(ctx context.Context, prefix string)) *ChunkManager_RemoveWithPrefix_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -639,20 +655,20 @@ func (_c *ChunkManager_RootPath_Call) Return(_a0 string) *ChunkManager_RootPath_
return _c return _c
} }
// Size provides a mock function with given fields: filePath // Size provides a mock function with given fields: ctx, filePath
func (_m *ChunkManager) Size(filePath string) (int64, error) { func (_m *ChunkManager) Size(ctx context.Context, filePath string) (int64, error) {
ret := _m.Called(filePath) ret := _m.Called(ctx, filePath)
var r0 int64 var r0 int64
if rf, ok := ret.Get(0).(func(string) int64); ok { if rf, ok := ret.Get(0).(func(context.Context, string) int64); ok {
r0 = rf(filePath) r0 = rf(ctx, filePath)
} else { } else {
r0 = ret.Get(0).(int64) r0 = ret.Get(0).(int64)
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok { if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(filePath) r1 = rf(ctx, filePath)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
@ -666,14 +682,15 @@ type ChunkManager_Size_Call struct {
} }
// Size is a helper method to define mock.On call // Size is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
func (_e *ChunkManager_Expecter) Size(filePath interface{}) *ChunkManager_Size_Call { func (_e *ChunkManager_Expecter) Size(ctx interface{}, filePath interface{}) *ChunkManager_Size_Call {
return &ChunkManager_Size_Call{Call: _e.mock.On("Size", filePath)} return &ChunkManager_Size_Call{Call: _e.mock.On("Size", ctx, filePath)}
} }
func (_c *ChunkManager_Size_Call) Run(run func(filePath string)) *ChunkManager_Size_Call { func (_c *ChunkManager_Size_Call) Run(run func(ctx context.Context, filePath string)) *ChunkManager_Size_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string)) run(args[0].(context.Context), args[1].(string))
}) })
return _c return _c
} }
@ -683,13 +700,13 @@ func (_c *ChunkManager_Size_Call) Return(_a0 int64, _a1 error) *ChunkManager_Siz
return _c return _c
} }
// Write provides a mock function with given fields: filePath, content // Write provides a mock function with given fields: ctx, filePath, content
func (_m *ChunkManager) Write(filePath string, content []byte) error { func (_m *ChunkManager) Write(ctx context.Context, filePath string, content []byte) error {
ret := _m.Called(filePath, content) ret := _m.Called(ctx, filePath, content)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(string, []byte) error); ok { if rf, ok := ret.Get(0).(func(context.Context, string, []byte) error); ok {
r0 = rf(filePath, content) r0 = rf(ctx, filePath, content)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }
@ -703,15 +720,16 @@ type ChunkManager_Write_Call struct {
} }
// Write is a helper method to define mock.On call // Write is a helper method to define mock.On call
// - ctx context.Context
// - filePath string // - filePath string
// - content []byte // - content []byte
func (_e *ChunkManager_Expecter) Write(filePath interface{}, content interface{}) *ChunkManager_Write_Call { func (_e *ChunkManager_Expecter) Write(ctx interface{}, filePath interface{}, content interface{}) *ChunkManager_Write_Call {
return &ChunkManager_Write_Call{Call: _e.mock.On("Write", filePath, content)} return &ChunkManager_Write_Call{Call: _e.mock.On("Write", ctx, filePath, content)}
} }
func (_c *ChunkManager_Write_Call) Run(run func(filePath string, content []byte)) *ChunkManager_Write_Call { func (_c *ChunkManager_Write_Call) Run(run func(ctx context.Context, filePath string, content []byte)) *ChunkManager_Write_Call {
_c.Call.Run(func(args mock.Arguments) { _c.Call.Run(func(args mock.Arguments) {
run(args[0].(string), args[1].([]byte)) run(args[0].(context.Context), args[1].(string), args[2].([]byte))
}) })
return _c return _c
} }

View File

@ -252,6 +252,8 @@ func genSimpleIndexParams() indexParam {
} }
func generateIndex(indexBuildID UniqueID, cm storage.ChunkManager) ([]string, error) { func generateIndex(indexBuildID UniqueID, cm storage.ChunkManager) ([]string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
indexParams := genSimpleIndexParams() indexParams := genSimpleIndexParams()
var indexParamsKV []*commonpb.KeyValuePair var indexParamsKV []*commonpb.KeyValuePair
@ -298,7 +300,7 @@ func generateIndex(indexBuildID UniqueID, cm storage.ChunkManager) ([]string, er
for _, index := range serializedIndexBlobs { for _, index := range serializedIndexBlobs {
p := strconv.Itoa(int(indexBuildID)) + "/" + index.Key p := strconv.Itoa(int(indexBuildID)) + "/" + index.Key
indexPaths = append(indexPaths, p) indexPaths = append(indexPaths, p)
err := cm.Write(p, index.Value) err := cm.Write(ctx, p, index.Value)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -177,7 +177,7 @@ func (broker *globalMetaBroker) loadIndexExtraInfo(ctx context.Context, fieldPat
for _, indexFilePath := range fieldPathInfo.IndexFilePaths { for _, indexFilePath := range fieldPathInfo.IndexFilePaths {
// get index params when detecting indexParamPrefix // get index params when detecting indexParamPrefix
if path.Base(indexFilePath) == storage.IndexParamsKey { if path.Base(indexFilePath) == storage.IndexParamsKey {
content, err := broker.cm.MultiRead([]string{indexFilePath}) content, err := broker.cm.MultiRead(ctx, []string{indexFilePath})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -79,7 +79,7 @@ func TestGlobalMetaBroker_DataCoord(t *testing.T) {
dataCoord := newDataCoordMock(ctx) dataCoord := newDataCoordMock(ctx)
cm := storage.NewLocalChunkManager(storage.RootPath(globalMetaTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(globalMetaTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
handler, err := newGlobalMetaBroker(ctx, nil, dataCoord, nil, cm) handler, err := newGlobalMetaBroker(ctx, nil, dataCoord, nil, cm)
assert.Nil(t, err) assert.Nil(t, err)
@ -166,7 +166,7 @@ func TestGetDataSegmentInfosByIDs(t *testing.T) {
dataCoord := newDataCoordMock(ctx) dataCoord := newDataCoordMock(ctx)
cm := storage.NewLocalChunkManager(storage.RootPath(globalMetaTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(globalMetaTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
handler, err := newGlobalMetaBroker(ctx, nil, dataCoord, nil, cm) handler, err := newGlobalMetaBroker(ctx, nil, dataCoord, nil, cm)
assert.Nil(t, err) assert.Nil(t, err)

View File

@ -193,7 +193,7 @@ func TestHandoff(t *testing.T) {
dataCoord := newDataCoordMock(ctx) dataCoord := newDataCoordMock(ctx)
rootCoord.enableIndex = true rootCoord.enableIndex = true
cm := storage.NewLocalChunkManager(storage.RootPath(handoffHandlerTestDir)) cm := storage.NewLocalChunkManager(storage.RootPath(handoffHandlerTestDir))
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
broker, err := newGlobalMetaBroker(ctx, rootCoord, dataCoord, indexCoord, cm) broker, err := newGlobalMetaBroker(ctx, rootCoord, dataCoord, indexCoord, cm)
assert.Nil(t, err) assert.Nil(t, err)

View File

@ -345,7 +345,7 @@ func loadIndexForSegment(ctx context.Context, node *QueryNode, segmentID UniqueI
}, },
} }
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
if err != nil { if err != nil {
return err return err
} }
@ -415,7 +415,7 @@ func generateAndSaveIndex(segmentID UniqueID, msgLength int, indexType, metricTy
for _, index := range serializedIndexBlobs { for _, index := range serializedIndexBlobs {
p := strconv.Itoa(int(segmentID)) + "/" + index.Key p := strconv.Itoa(int(segmentID)) + "/" + index.Key
indexPaths = append(indexPaths, p) indexPaths = append(indexPaths, p)
err := cm.Write(p, index.Value) err := cm.Write(context.Background(), p, index.Value)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -615,7 +615,7 @@ func genVectorChunkManager(ctx context.Context, col *Collection) (*storage.Vecto
return nil, err return nil, err
} }
vcm, err := storage.NewVectorChunkManager(lcm, rcm, &etcdpb.CollectionMeta{ vcm, err := storage.NewVectorChunkManager(ctx, lcm, rcm, &etcdpb.CollectionMeta{
ID: col.id, ID: col.id,
Schema: col.schema, Schema: col.schema,
}, Params.QueryNodeCfg.CacheMemoryLimit, false) }, Params.QueryNodeCfg.CacheMemoryLimit, false)
@ -1059,7 +1059,7 @@ func saveBinLog(ctx context.Context,
log.Debug("[query node unittest] save statsLog file to MinIO/S3") log.Debug("[query node unittest] save statsLog file to MinIO/S3")
cm := storage.NewLocalChunkManager(storage.RootPath(defaultLocalStorage)) cm := storage.NewLocalChunkManager(storage.RootPath(defaultLocalStorage))
err = cm.MultiWrite(kvs) err = cm.MultiWrite(ctx, kvs)
return fieldBinlog, statsBinlog, err return fieldBinlog, statsBinlog, err
} }
@ -1107,7 +1107,7 @@ func saveDeltaLog(collectionID UniqueID,
}) })
log.Debug("[query node unittest] save delta log file to MinIO/S3") log.Debug("[query node unittest] save delta log file to MinIO/S3")
return fieldBinlog, storage.NewLocalChunkManager(storage.RootPath(defaultLocalStorage)).MultiWrite(kvs) return fieldBinlog, storage.NewLocalChunkManager(storage.RootPath(defaultLocalStorage)).MultiWrite(context.Background(), kvs)
} }
func genSimpleTimestampFieldData(numRows int) []Timestamp { func genSimpleTimestampFieldData(numRows int) []Timestamp {
@ -2034,14 +2034,14 @@ func newMockChunkManager(opts ...mockChunkManagerOpt) storage.ChunkManager {
return ret return ret
} }
func (m *mockChunkManager) ReadAt(path string, offset int64, length int64) ([]byte, error) { func (m *mockChunkManager) ReadAt(ctx context.Context, path string, offset int64, length int64) ([]byte, error) {
if m.readAt != nil { if m.readAt != nil {
return m.readAt(path, offset, length) return m.readAt(path, offset, length)
} }
return defaultReadAt(path, offset, length) return defaultReadAt(path, offset, length)
} }
func (m *mockChunkManager) Read(path string) ([]byte, error) { func (m *mockChunkManager) Read(ctx context.Context, path string) ([]byte, error) {
if m.read != nil { if m.read != nil {
return m.read(path) return m.read(path)
} }

View File

@ -71,7 +71,7 @@ func newQueryShard(
if remoteChunkManager == nil { if remoteChunkManager == nil {
return nil, fmt.Errorf("can not create vector chunk manager for remote chunk manager is nil") return nil, fmt.Errorf("can not create vector chunk manager for remote chunk manager is nil")
} }
vectorChunkManager, err := storage.NewVectorChunkManager(localChunkManager, remoteChunkManager, vectorChunkManager, err := storage.NewVectorChunkManager(ctx, localChunkManager, remoteChunkManager,
&etcdpb.CollectionMeta{ &etcdpb.CollectionMeta{
ID: collectionID, ID: collectionID,
Schema: collection.schema, Schema: collection.schema,

View File

@ -25,7 +25,7 @@ import (
// retrieveOnSegments performs retrieve on listed segments // retrieveOnSegments performs retrieve on listed segments
// all segment ids are validated before calling this function // all segment ids are validated before calling this function
func retrieveOnSegments(replica ReplicaInterface, segType segmentType, collID UniqueID, plan *RetrievePlan, segIDs []UniqueID, vcm storage.ChunkManager) ([]*segcorepb.RetrieveResults, error) { func retrieveOnSegments(ctx context.Context, replica ReplicaInterface, segType segmentType, collID UniqueID, plan *RetrievePlan, segIDs []UniqueID, vcm storage.ChunkManager) ([]*segcorepb.RetrieveResults, error) {
var retrieveResults []*segcorepb.RetrieveResults var retrieveResults []*segcorepb.RetrieveResults
for _, segID := range segIDs { for _, segID := range segIDs {
@ -37,7 +37,7 @@ func retrieveOnSegments(replica ReplicaInterface, segType segmentType, collID Un
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := seg.fillIndexedFieldsData(collID, vcm, result); err != nil { if err := seg.fillIndexedFieldsData(ctx, collID, vcm, result); err != nil {
return nil, err return nil, err
} }
retrieveResults = append(retrieveResults, result) retrieveResults = append(retrieveResults, result)
@ -56,7 +56,7 @@ func retrieveHistorical(ctx context.Context, replica ReplicaInterface, plan *Ret
return retrieveResults, retrieveSegmentIDs, retrievePartIDs, err return retrieveResults, retrieveSegmentIDs, retrievePartIDs, err
} }
retrieveResults, err = retrieveOnSegments(replica, segmentTypeSealed, collID, plan, retrieveSegmentIDs, vcm) retrieveResults, err = retrieveOnSegments(ctx, replica, segmentTypeSealed, collID, plan, retrieveSegmentIDs, vcm)
return retrieveResults, retrievePartIDs, retrieveSegmentIDs, err return retrieveResults, retrievePartIDs, retrieveSegmentIDs, err
} }
@ -71,6 +71,6 @@ func retrieveStreaming(ctx context.Context, replica ReplicaInterface, plan *Retr
if err != nil { if err != nil {
return retrieveResults, retrieveSegmentIDs, retrievePartIDs, err return retrieveResults, retrieveSegmentIDs, retrievePartIDs, err
} }
retrieveResults, err = retrieveOnSegments(replica, segmentTypeGrowing, collID, plan, retrieveSegmentIDs, vcm) retrieveResults, err = retrieveOnSegments(ctx, replica, segmentTypeGrowing, collID, plan, retrieveSegmentIDs, vcm)
return retrieveResults, retrievePartIDs, retrieveSegmentIDs, err return retrieveResults, retrievePartIDs, retrieveSegmentIDs, err
} }

View File

@ -26,6 +26,7 @@ package querynode
import "C" import "C"
import ( import (
"bytes" "bytes"
"context"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -407,10 +408,10 @@ func (s *Segment) getFieldDataPath(indexedFieldInfo *IndexedFieldInfo, offset in
return dataPath, offsetInBinlog return dataPath, offsetInBinlog
} }
func fillBinVecFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillBinVecFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
dim := fieldData.GetVectors().GetDim() dim := fieldData.GetVectors().GetDim()
rowBytes := dim / 8 rowBytes := dim / 8
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
@ -420,10 +421,10 @@ func fillBinVecFieldData(vcm storage.ChunkManager, dataPath string, fieldData *s
return nil return nil
} }
func fillFloatVecFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillFloatVecFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
dim := fieldData.GetVectors().GetDim() dim := fieldData.GetVectors().GetDim()
rowBytes := dim * 4 rowBytes := dim * 4
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
@ -438,10 +439,10 @@ func fillFloatVecFieldData(vcm storage.ChunkManager, dataPath string, fieldData
return nil return nil
} }
func fillBoolFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillBoolFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read whole file. // read whole file.
// TODO: optimize here. // TODO: optimize here.
content, err := vcm.Read(dataPath) content, err := vcm.Read(ctx, dataPath)
if err != nil { if err != nil {
return err return err
} }
@ -454,10 +455,10 @@ func fillBoolFieldData(vcm storage.ChunkManager, dataPath string, fieldData *sch
return nil return nil
} }
func fillStringFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillStringFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read whole file. // read whole file.
// TODO: optimize here. // TODO: optimize here.
content, err := vcm.Read(dataPath) content, err := vcm.Read(ctx, dataPath)
if err != nil { if err != nil {
return err return err
} }
@ -470,10 +471,10 @@ func fillStringFieldData(vcm storage.ChunkManager, dataPath string, fieldData *s
return nil return nil
} }
func fillInt8FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillInt8FieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset. // read by offset.
rowBytes := int64(1) rowBytes := int64(1)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
@ -485,10 +486,10 @@ func fillInt8FieldData(vcm storage.ChunkManager, dataPath string, fieldData *sch
return nil return nil
} }
func fillInt16FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillInt16FieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset. // read by offset.
rowBytes := int64(2) rowBytes := int64(2)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
@ -500,74 +501,74 @@ func fillInt16FieldData(vcm storage.ChunkManager, dataPath string, fieldData *sc
return nil return nil
} }
func fillInt32FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillInt32FieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset. // read by offset.
rowBytes := int64(4) rowBytes := int64(4)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetIntData().GetData()[i])) return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetIntData().GetData()[i]))
} }
func fillInt64FieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillInt64FieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset. // read by offset.
rowBytes := int64(8) rowBytes := int64(8)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetLongData().GetData()[i])) return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetLongData().GetData()[i]))
} }
func fillFloatFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillFloatFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset. // read by offset.
rowBytes := int64(4) rowBytes := int64(4)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetFloatData().GetData()[i])) return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetFloatData().GetData()[i]))
} }
func fillDoubleFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillDoubleFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
// read by offset. // read by offset.
rowBytes := int64(8) rowBytes := int64(8)
content, err := vcm.ReadAt(dataPath, offset*rowBytes, rowBytes) content, err := vcm.ReadAt(ctx, dataPath, offset*rowBytes, rowBytes)
if err != nil { if err != nil {
return err return err
} }
return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetDoubleData().GetData()[i])) return funcutil.ReadBinary(endian, content, &(fieldData.GetScalars().GetDoubleData().GetData()[i]))
} }
func fillFieldData(vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error { func fillFieldData(ctx context.Context, vcm storage.ChunkManager, dataPath string, fieldData *schemapb.FieldData, i int, offset int64, endian binary.ByteOrder) error {
switch fieldData.Type { switch fieldData.Type {
case schemapb.DataType_BinaryVector: case schemapb.DataType_BinaryVector:
return fillBinVecFieldData(vcm, dataPath, fieldData, i, offset, endian) return fillBinVecFieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_FloatVector: case schemapb.DataType_FloatVector:
return fillFloatVecFieldData(vcm, dataPath, fieldData, i, offset, endian) return fillFloatVecFieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Bool: case schemapb.DataType_Bool:
return fillBoolFieldData(vcm, dataPath, fieldData, i, offset, endian) return fillBoolFieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_String, schemapb.DataType_VarChar: case schemapb.DataType_String, schemapb.DataType_VarChar:
return fillStringFieldData(vcm, dataPath, fieldData, i, offset, endian) return fillStringFieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Int8: case schemapb.DataType_Int8:
return fillInt8FieldData(vcm, dataPath, fieldData, i, offset, endian) return fillInt8FieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Int16: case schemapb.DataType_Int16:
return fillInt16FieldData(vcm, dataPath, fieldData, i, offset, endian) return fillInt16FieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Int32: case schemapb.DataType_Int32:
return fillInt32FieldData(vcm, dataPath, fieldData, i, offset, endian) return fillInt32FieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Int64: case schemapb.DataType_Int64:
return fillInt64FieldData(vcm, dataPath, fieldData, i, offset, endian) return fillInt64FieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Float: case schemapb.DataType_Float:
return fillFloatFieldData(vcm, dataPath, fieldData, i, offset, endian) return fillFloatFieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
case schemapb.DataType_Double: case schemapb.DataType_Double:
return fillDoubleFieldData(vcm, dataPath, fieldData, i, offset, endian) return fillDoubleFieldData(ctx, vcm, dataPath, fieldData, i, offset, endian)
default: default:
return fmt.Errorf("invalid data type: %s", fieldData.Type.String()) return fmt.Errorf("invalid data type: %s", fieldData.Type.String())
} }
} }
func (s *Segment) fillIndexedFieldsData(collectionID UniqueID, func (s *Segment) fillIndexedFieldsData(ctx context.Context, collectionID UniqueID,
vcm storage.ChunkManager, result *segcorepb.RetrieveResults) error { vcm storage.ChunkManager, result *segcorepb.RetrieveResults) error {
for _, fieldData := range result.FieldsData { for _, fieldData := range result.FieldsData {
@ -588,7 +589,7 @@ func (s *Segment) fillIndexedFieldsData(collectionID UniqueID,
endian := common.Endian endian := common.Endian
// fill field data that fieldData[i] = dataPath[offsetInBinlog*rowBytes, (offsetInBinlog+1)*rowBytes] // fill field data that fieldData[i] = dataPath[offsetInBinlog*rowBytes, (offsetInBinlog+1)*rowBytes]
if err := fillFieldData(vcm, dataPath, fieldData, i, offsetInBinlog, endian); err != nil { if err := fillFieldData(ctx, vcm, dataPath, fieldData, i, offsetInBinlog, endian); err != nil {
return err return err
} }
} }

View File

@ -82,7 +82,7 @@ func (loader *segmentLoader) getFieldType(segment *Segment, fieldID FieldID) (sc
return coll.getFieldType(fieldID) return coll.getFieldType(fieldID)
} }
func (loader *segmentLoader) LoadSegment(req *querypb.LoadSegmentsRequest, segmentType segmentType) error { func (loader *segmentLoader) LoadSegment(ctx context.Context, req *querypb.LoadSegmentsRequest, segmentType segmentType) error {
if req.Base == nil { if req.Base == nil {
return fmt.Errorf("nil base message when load segment, collectionID = %d", req.CollectionID) return fmt.Errorf("nil base message when load segment, collectionID = %d", req.CollectionID)
} }
@ -167,7 +167,7 @@ func (loader *segmentLoader) LoadSegment(req *querypb.LoadSegmentsRequest, segme
segment := newSegments[segmentID] segment := newSegments[segmentID]
tr := timerecord.NewTimeRecorder("loadDurationPerSegment") tr := timerecord.NewTimeRecorder("loadDurationPerSegment")
err := loader.loadFiles(segment, loadInfo) err := loader.loadFiles(ctx, segment, loadInfo)
if err != nil { if err != nil {
log.Error("load segment failed when load data into memory", log.Error("load segment failed when load data into memory",
zap.Int64("partitionID", partitionID), zap.Int64("partitionID", partitionID),
@ -211,7 +211,7 @@ func (loader *segmentLoader) LoadSegment(req *querypb.LoadSegmentsRequest, segme
return nil return nil
} }
func (loader *segmentLoader) loadFiles(segment *Segment, func (loader *segmentLoader) loadFiles(ctx context.Context, segment *Segment,
loadInfo *querypb.SegmentLoadInfo) error { loadInfo *querypb.SegmentLoadInfo) error {
collectionID := loadInfo.CollectionID collectionID := loadInfo.CollectionID
partitionID := loadInfo.PartitionID partitionID := loadInfo.PartitionID
@ -262,14 +262,14 @@ func (loader *segmentLoader) loadFiles(segment *Segment,
} }
} }
if err := loader.loadIndexedFieldData(segment, indexedFieldInfos); err != nil { if err := loader.loadIndexedFieldData(ctx, segment, indexedFieldInfos); err != nil {
return err return err
} }
if err := loader.loadSealedSegmentFields(segment, fieldBinlogs, loadInfo); err != nil { if err := loader.loadSealedSegmentFields(ctx, segment, fieldBinlogs, loadInfo); err != nil {
return err return err
} }
} else { } else {
if err := loader.loadGrowingSegmentFields(segment, loadInfo.BinlogPaths); err != nil { if err := loader.loadGrowingSegmentFields(ctx, segment, loadInfo.BinlogPaths); err != nil {
return err return err
} }
} }
@ -279,14 +279,14 @@ func (loader *segmentLoader) loadFiles(segment *Segment,
} else { } else {
log.Debug("loading bloom filter...", zap.Int64("segmentID", segmentID)) log.Debug("loading bloom filter...", zap.Int64("segmentID", segmentID))
pkStatsBinlogs := loader.filterPKStatsBinlogs(loadInfo.Statslogs, pkFieldID) pkStatsBinlogs := loader.filterPKStatsBinlogs(loadInfo.Statslogs, pkFieldID)
err = loader.loadSegmentBloomFilter(segment, pkStatsBinlogs) err = loader.loadSegmentBloomFilter(ctx, segment, pkStatsBinlogs)
if err != nil { if err != nil {
return err return err
} }
} }
log.Debug("loading delta...", zap.Int64("segmentID", segmentID)) log.Debug("loading delta...", zap.Int64("segmentID", segmentID))
err = loader.loadDeltaLogs(segment, loadInfo.Deltalogs) err = loader.loadDeltaLogs(ctx, segment, loadInfo.Deltalogs)
return err return err
} }
@ -302,7 +302,7 @@ func (loader *segmentLoader) filterPKStatsBinlogs(fieldBinlogs []*datapb.FieldBi
return result return result
} }
func (loader *segmentLoader) loadGrowingSegmentFields(segment *Segment, fieldBinlogs []*datapb.FieldBinlog) error { func (loader *segmentLoader) loadGrowingSegmentFields(ctx context.Context, segment *Segment, fieldBinlogs []*datapb.FieldBinlog) error {
if len(fieldBinlogs) <= 0 { if len(fieldBinlogs) <= 0 {
return nil return nil
} }
@ -313,7 +313,7 @@ func (loader *segmentLoader) loadGrowingSegmentFields(segment *Segment, fieldBin
// change all field bin log loading into concurrent // change all field bin log loading into concurrent
loadFutures := make([]*concurrency.Future, 0, len(fieldBinlogs)) loadFutures := make([]*concurrency.Future, 0, len(fieldBinlogs))
for _, fieldBinlog := range fieldBinlogs { for _, fieldBinlog := range fieldBinlogs {
futures := loader.loadFieldBinlogsAsync(fieldBinlog) futures := loader.loadFieldBinlogsAsync(ctx, fieldBinlog)
loadFutures = append(loadFutures, futures...) loadFutures = append(loadFutures, futures...)
} }
@ -363,11 +363,11 @@ func (loader *segmentLoader) loadGrowingSegmentFields(segment *Segment, fieldBin
} }
} }
func (loader *segmentLoader) loadSealedSegmentFields(segment *Segment, fields []*datapb.FieldBinlog, loadInfo *querypb.SegmentLoadInfo) error { func (loader *segmentLoader) loadSealedSegmentFields(ctx context.Context, segment *Segment, fields []*datapb.FieldBinlog, loadInfo *querypb.SegmentLoadInfo) error {
// Load fields concurrently // Load fields concurrently
futures := make([]*concurrency.Future, 0, len(fields)) futures := make([]*concurrency.Future, 0, len(fields))
for _, field := range fields { for _, field := range fields {
future := loader.loadSealedFieldAsync(segment, field, loadInfo) future := loader.loadSealedFieldAsync(ctx, segment, field, loadInfo)
futures = append(futures, future) futures = append(futures, future)
} }
@ -387,13 +387,13 @@ func (loader *segmentLoader) loadSealedSegmentFields(segment *Segment, fields []
} }
// async load field of sealed segment // async load field of sealed segment
func (loader *segmentLoader) loadSealedFieldAsync(segment *Segment, field *datapb.FieldBinlog, loadInfo *querypb.SegmentLoadInfo) *concurrency.Future { func (loader *segmentLoader) loadSealedFieldAsync(ctx context.Context, segment *Segment, field *datapb.FieldBinlog, loadInfo *querypb.SegmentLoadInfo) *concurrency.Future {
iCodec := storage.InsertCodec{} iCodec := storage.InsertCodec{}
// Avoid consuming too much memory if no CPU worker ready, // Avoid consuming too much memory if no CPU worker ready,
// acquire a CPU worker before load field binlogs // acquire a CPU worker before load field binlogs
return loader.cpuPool.Submit(func() (interface{}, error) { return loader.cpuPool.Submit(func() (interface{}, error) {
futures := loader.loadFieldBinlogsAsync(field) futures := loader.loadFieldBinlogsAsync(ctx, field)
blobs := make([]*storage.Blob, len(futures)) blobs := make([]*storage.Blob, len(futures))
for index, future := range futures { for index, future := range futures {
@ -419,12 +419,12 @@ func (loader *segmentLoader) loadSealedFieldAsync(segment *Segment, field *datap
} }
// Load binlogs concurrently into memory from KV storage asyncly // Load binlogs concurrently into memory from KV storage asyncly
func (loader *segmentLoader) loadFieldBinlogsAsync(field *datapb.FieldBinlog) []*concurrency.Future { func (loader *segmentLoader) loadFieldBinlogsAsync(ctx context.Context, field *datapb.FieldBinlog) []*concurrency.Future {
futures := make([]*concurrency.Future, 0, len(field.Binlogs)) futures := make([]*concurrency.Future, 0, len(field.Binlogs))
for i := range field.Binlogs { for i := range field.Binlogs {
path := field.Binlogs[i].GetLogPath() path := field.Binlogs[i].GetLogPath()
future := loader.ioPool.Submit(func() (interface{}, error) { future := loader.ioPool.Submit(func() (interface{}, error) {
binLog, err := loader.cm.Read(path) binLog, err := loader.cm.Read(ctx, path)
if err != nil { if err != nil {
log.Warn("failed to load binlog", zap.String("filePath", path), zap.Error(err)) log.Warn("failed to load binlog", zap.String("filePath", path), zap.Error(err))
return nil, err return nil, err
@ -442,10 +442,10 @@ func (loader *segmentLoader) loadFieldBinlogsAsync(field *datapb.FieldBinlog) []
return futures return futures
} }
func (loader *segmentLoader) loadIndexedFieldData(segment *Segment, vecFieldInfos map[int64]*IndexedFieldInfo) error { func (loader *segmentLoader) loadIndexedFieldData(ctx context.Context, segment *Segment, vecFieldInfos map[int64]*IndexedFieldInfo) error {
for fieldID, fieldInfo := range vecFieldInfos { for fieldID, fieldInfo := range vecFieldInfos {
indexInfo := fieldInfo.indexInfo indexInfo := fieldInfo.indexInfo
err := loader.loadFieldIndexData(segment, indexInfo) err := loader.loadFieldIndexData(ctx, segment, indexInfo)
if err != nil { if err != nil {
return err return err
} }
@ -461,7 +461,7 @@ func (loader *segmentLoader) loadIndexedFieldData(segment *Segment, vecFieldInfo
return nil return nil
} }
func (loader *segmentLoader) loadFieldIndexData(segment *Segment, indexInfo *querypb.FieldIndexInfo) error { func (loader *segmentLoader) loadFieldIndexData(ctx context.Context, segment *Segment, indexInfo *querypb.FieldIndexInfo) error {
indexBuffer := make([][]byte, 0, len(indexInfo.IndexFilePaths)) indexBuffer := make([][]byte, 0, len(indexInfo.IndexFilePaths))
filteredPaths := make([]string, 0, len(indexInfo.IndexFilePaths)) filteredPaths := make([]string, 0, len(indexInfo.IndexFilePaths))
futures := make([]*concurrency.Future, 0, len(indexInfo.IndexFilePaths)) futures := make([]*concurrency.Future, 0, len(indexInfo.IndexFilePaths))
@ -472,7 +472,7 @@ func (loader *segmentLoader) loadFieldIndexData(segment *Segment, indexInfo *que
if path.Base(indexPath) == storage.IndexParamsKey { if path.Base(indexPath) == storage.IndexParamsKey {
indexParamsFuture := loader.ioPool.Submit(func() (interface{}, error) { indexParamsFuture := loader.ioPool.Submit(func() (interface{}, error) {
log.Debug("load index params file", zap.String("path", indexPath)) log.Debug("load index params file", zap.String("path", indexPath))
return loader.cm.Read(indexPath) return loader.cm.Read(ctx, indexPath)
}) })
indexParamsBlob, err := indexParamsFuture.Await() indexParamsBlob, err := indexParamsFuture.Await()
@ -514,7 +514,7 @@ func (loader *segmentLoader) loadFieldIndexData(segment *Segment, indexInfo *que
indexFuture := loader.cpuPool.Submit(func() (interface{}, error) { indexFuture := loader.cpuPool.Submit(func() (interface{}, error) {
indexBlobFuture := loader.ioPool.Submit(func() (interface{}, error) { indexBlobFuture := loader.ioPool.Submit(func() (interface{}, error) {
log.Debug("load index file", zap.String("path", indexPath)) log.Debug("load index file", zap.String("path", indexPath))
data, err := loader.cm.Read(indexPath) data, err := loader.cm.Read(ctx, indexPath)
if err != nil { if err != nil {
log.Warn("failed to load index file", zap.String("path", indexPath), zap.Error(err)) log.Warn("failed to load index file", zap.String("path", indexPath), zap.Error(err))
return nil, err return nil, err
@ -623,13 +623,13 @@ func (loader *segmentLoader) loadSealedSegments(segment *Segment, insertData *st
return nil return nil
} }
func (loader *segmentLoader) loadSegmentBloomFilter(segment *Segment, binlogPaths []string) error { func (loader *segmentLoader) loadSegmentBloomFilter(ctx context.Context, segment *Segment, binlogPaths []string) error {
if len(binlogPaths) == 0 { if len(binlogPaths) == 0 {
log.Info("there are no stats logs saved with segment", zap.Any("segmentID", segment.segmentID)) log.Info("there are no stats logs saved with segment", zap.Any("segmentID", segment.segmentID))
return nil return nil
} }
values, err := loader.cm.MultiRead(binlogPaths) values, err := loader.cm.MultiRead(ctx, binlogPaths)
if err != nil { if err != nil {
return err return err
} }
@ -662,12 +662,12 @@ func (loader *segmentLoader) loadSegmentBloomFilter(segment *Segment, binlogPath
return nil return nil
} }
func (loader *segmentLoader) loadDeltaLogs(segment *Segment, deltaLogs []*datapb.FieldBinlog) error { func (loader *segmentLoader) loadDeltaLogs(ctx context.Context, segment *Segment, deltaLogs []*datapb.FieldBinlog) error {
dCodec := storage.DeleteCodec{} dCodec := storage.DeleteCodec{}
var blobs []*storage.Blob var blobs []*storage.Blob
for _, deltaLog := range deltaLogs { for _, deltaLog := range deltaLogs {
for _, bLog := range deltaLog.GetBinlogs() { for _, bLog := range deltaLog.GetBinlogs() {
value, err := loader.cm.Read(bLog.GetLogPath()) value, err := loader.cm.Read(ctx, bLog.GetLogPath())
if err != nil { if err != nil {
return err return err
} }

View File

@ -75,7 +75,7 @@ func TestSegmentLoader_loadSegment(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
assert.NoError(t, err) assert.NoError(t, err)
}) })
@ -106,7 +106,7 @@ func TestSegmentLoader_loadSegment(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -119,7 +119,7 @@ func TestSegmentLoader_loadSegment(t *testing.T) {
req := &querypb.LoadSegmentsRequest{} req := &querypb.LoadSegmentsRequest{}
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
assert.Error(t, err) assert.Error(t, err)
}) })
} }
@ -192,7 +192,7 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema) binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
assert.NoError(t, err) assert.NoError(t, err)
err = loader.loadSealedSegmentFields(segment, binlog, &querypb.SegmentLoadInfo{}) err = loader.loadSealedSegmentFields(ctx, segment, binlog, &querypb.SegmentLoadInfo{})
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -245,7 +245,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -283,7 +283,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
}, },
}, },
} }
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -308,7 +308,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req, commonpb.SegmentState_Dropped) err = loader.LoadSegment(ctx, req, commonpb.SegmentState_Dropped)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -322,7 +322,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
cm := &mocks.ChunkManager{} cm := &mocks.ChunkManager{}
cm.EXPECT().Read(mock.AnythingOfType("string")).Return(nil, errors.New("mocked")) cm.EXPECT().Read(mock.Anything, mock.AnythingOfType("string")).Return(nil, errors.New("mocked"))
loader.cm = cm loader.cm = cm
fieldPk := genPKFieldSchema(simpleInt64Field) fieldPk := genPKFieldSchema(simpleInt64Field)
@ -350,7 +350,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema) binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
assert.NoError(t, err) assert.NoError(t, err)
err = loader.loadSealedSegmentFields(segment, binlog, &querypb.SegmentLoadInfo{}) err = loader.loadSealedSegmentFields(ctx, segment, binlog, &querypb.SegmentLoadInfo{})
assert.Error(t, err) assert.Error(t, err)
}) })
@ -365,7 +365,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
cm := &mocks.ChunkManager{} cm := &mocks.ChunkManager{}
cm.EXPECT().Read(mock.AnythingOfType("string")).Return(nil, errors.New("mocked")) cm.EXPECT().Read(mock.Anything, mock.AnythingOfType("string")).Return(nil, errors.New("mocked"))
loader.cm = cm loader.cm = cm
fieldPk := genPKFieldSchema(simpleInt64Field) fieldPk := genPKFieldSchema(simpleInt64Field)
@ -390,7 +390,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
pool) pool)
assert.Nil(t, err) assert.Nil(t, err)
err = loader.loadFieldIndexData(segment, &querypb.FieldIndexInfo{ err = loader.loadFieldIndexData(ctx, segment, &querypb.FieldIndexInfo{
FieldID: fieldVector.FieldID, FieldID: fieldVector.FieldID,
EnableIndex: true, EnableIndex: true,
@ -514,7 +514,7 @@ func TestSegmentLoader_testLoadGrowingAndSealed(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req1, segmentTypeSealed) err = loader.LoadSegment(ctx, req1, segmentTypeSealed)
assert.NoError(t, err) assert.NoError(t, err)
segment1, err := loader.metaReplica.getSegmentByID(segmentID1, segmentTypeSealed) segment1, err := loader.metaReplica.getSegmentByID(segmentID1, segmentTypeSealed)
@ -540,7 +540,7 @@ func TestSegmentLoader_testLoadGrowingAndSealed(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req2, segmentTypeSealed) err = loader.LoadSegment(ctx, req2, segmentTypeSealed)
assert.NoError(t, err) assert.NoError(t, err)
segment2, err := loader.metaReplica.getSegmentByID(segmentID2, segmentTypeSealed) segment2, err := loader.metaReplica.getSegmentByID(segmentID2, segmentTypeSealed)
@ -574,7 +574,7 @@ func TestSegmentLoader_testLoadGrowingAndSealed(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req1, segmentTypeGrowing) err = loader.LoadSegment(ctx, req1, segmentTypeGrowing)
assert.NoError(t, err) assert.NoError(t, err)
segment1, err := loader.metaReplica.getSegmentByID(segmentID1, segmentTypeGrowing) segment1, err := loader.metaReplica.getSegmentByID(segmentID1, segmentTypeGrowing)
@ -600,7 +600,7 @@ func TestSegmentLoader_testLoadGrowingAndSealed(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req2, segmentTypeGrowing) err = loader.LoadSegment(ctx, req2, segmentTypeGrowing)
assert.NoError(t, err) assert.NoError(t, err)
segment2, err := loader.metaReplica.getSegmentByID(segmentID2, segmentTypeGrowing) segment2, err := loader.metaReplica.getSegmentByID(segmentID2, segmentTypeGrowing)
@ -660,7 +660,7 @@ func TestSegmentLoader_testLoadSealedSegmentWithIndex(t *testing.T) {
}, },
} }
err = loader.LoadSegment(req, segmentTypeSealed) err = loader.LoadSegment(ctx, req, segmentTypeSealed)
assert.NoError(t, err) assert.NoError(t, err)
segment, err := node.metaReplica.getSegmentByID(segmentID, segmentTypeSealed) segment, err := node.metaReplica.getSegmentByID(segmentID, segmentTypeSealed)

View File

@ -714,7 +714,7 @@ func TestSegment_fillIndexedFieldsData(t *testing.T) {
Offset: []int64{0}, Offset: []int64{0},
FieldsData: fieldData, FieldsData: fieldData,
} }
err = segment.fillIndexedFieldsData(defaultCollectionID, vecCM, result) err = segment.fillIndexedFieldsData(ctx, defaultCollectionID, vecCM, result)
assert.Error(t, err) assert.Error(t, err)
}) })
} }
@ -747,6 +747,8 @@ func Test_getFieldDataPath(t *testing.T) {
} }
func Test_fillBinVecFieldData(t *testing.T) { func Test_fillBinVecFieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
m = newMockChunkManager(withDefaultReadAt()) m = newMockChunkManager(withDefaultReadAt())
@ -758,13 +760,15 @@ func Test_fillBinVecFieldData(t *testing.T) {
offset := int64(100) offset := int64(100)
endian := common.Endian endian := common.Endian
assert.NoError(t, fillBinVecFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillBinVecFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillBinVecFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillBinVecFieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillFloatVecFieldData(t *testing.T) { func Test_fillFloatVecFieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
m = newMockChunkManager(withDefaultReadAt()) m = newMockChunkManager(withDefaultReadAt())
@ -776,16 +780,18 @@ func Test_fillFloatVecFieldData(t *testing.T) {
offset := int64(100) offset := int64(100)
endian := common.Endian endian := common.Endian
assert.NoError(t, fillFloatVecFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillFloatVecFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillFloatVecFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillFloatVecFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillFloatVecFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillFloatVecFieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillBoolFieldData(t *testing.T) { func Test_fillBoolFieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -797,16 +803,19 @@ func Test_fillBoolFieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillBoolFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillBoolFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadErr()) m = newMockChunkManager(withReadErr())
assert.Error(t, fillBoolFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillBoolFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadIllegalBool()) m = newMockChunkManager(withReadIllegalBool())
assert.Error(t, fillBoolFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillBoolFieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillStringFieldData(t *testing.T) { func Test_fillStringFieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -818,16 +827,19 @@ func Test_fillStringFieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillStringFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillStringFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadErr()) m = newMockChunkManager(withReadErr())
assert.Error(t, fillStringFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillStringFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadIllegalString()) m = newMockChunkManager(withReadIllegalString())
assert.Error(t, fillStringFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillStringFieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillInt8FieldData(t *testing.T) { func Test_fillInt8FieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -839,16 +851,19 @@ func Test_fillInt8FieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillInt8FieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillInt8FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillInt8FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt8FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillInt8FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt8FieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillInt16FieldData(t *testing.T) { func Test_fillInt16FieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -860,16 +875,18 @@ func Test_fillInt16FieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillInt16FieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillInt16FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillInt16FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt16FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillInt16FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt16FieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillInt32FieldData(t *testing.T) { func Test_fillInt32FieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -881,16 +898,18 @@ func Test_fillInt32FieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillInt32FieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillInt32FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillInt32FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt32FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillInt32FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt32FieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillInt64FieldData(t *testing.T) { func Test_fillInt64FieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -902,16 +921,18 @@ func Test_fillInt64FieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillInt64FieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillInt64FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillInt64FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt64FieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillInt64FieldData(m, path, f, index, offset, endian)) assert.Error(t, fillInt64FieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillFloatFieldData(t *testing.T) { func Test_fillFloatFieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -923,16 +944,18 @@ func Test_fillFloatFieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillFloatFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillFloatFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillFloatFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillFloatFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillFloatFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillFloatFieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillDoubleFieldData(t *testing.T) { func Test_fillDoubleFieldData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var m storage.ChunkManager var m storage.ChunkManager
offset := int64(100) offset := int64(100)
@ -944,13 +967,13 @@ func Test_fillDoubleFieldData(t *testing.T) {
index := 0 index := 0
endian := common.Endian endian := common.Endian
assert.NoError(t, fillDoubleFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillDoubleFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtErr()) m = newMockChunkManager(withReadAtErr())
assert.Error(t, fillDoubleFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillDoubleFieldData(ctx, m, path, f, index, offset, endian))
m = newMockChunkManager(withReadAtEmptyContent()) m = newMockChunkManager(withReadAtEmptyContent())
assert.Error(t, fillDoubleFieldData(m, path, f, index, offset, endian)) assert.Error(t, fillDoubleFieldData(ctx, m, path, f, index, offset, endian))
} }
func Test_fillFieldData(t *testing.T) { func Test_fillFieldData(t *testing.T) {
@ -986,10 +1009,10 @@ func Test_fillFieldData(t *testing.T) {
m = newMockChunkManager(withDefaultReadAt()) m = newMockChunkManager(withDefaultReadAt())
} }
assert.NoError(t, fillFieldData(m, path, f, index, offset, endian)) assert.NoError(t, fillFieldData(context.Background(), m, path, f, index, offset, endian))
} }
assert.Error(t, fillFieldData(m, path, &schemapb.FieldData{Type: schemapb.DataType_None}, index, offset, endian)) assert.Error(t, fillFieldData(context.Background(), m, path, &schemapb.FieldData{Type: schemapb.DataType_None}, index, offset, endian))
} }
func TestUpdateBloomFilter(t *testing.T) { func TestUpdateBloomFilter(t *testing.T) {

View File

@ -241,7 +241,7 @@ func (w *watchDmChannelsTask) Execute(ctx context.Context) (err error) {
zap.Int64("collectionID", collectionID), zap.Int64("collectionID", collectionID),
zap.Int64s("unFlushedSegmentIDs", unFlushedSegmentIDs), zap.Int64s("unFlushedSegmentIDs", unFlushedSegmentIDs),
) )
err = w.node.loader.LoadSegment(req, segmentTypeGrowing) err = w.node.loader.LoadSegment(w.ctx, req, segmentTypeGrowing)
if err != nil { if err != nil {
log.Warn(err.Error()) log.Warn(err.Error())
return err return err
@ -549,7 +549,7 @@ func (l *loadSegmentsTask) Execute(ctx context.Context) error {
segmentIDs := lo.Map(l.req.Infos, func(info *queryPb.SegmentLoadInfo, idx int) UniqueID { return info.SegmentID }) segmentIDs := lo.Map(l.req.Infos, func(info *queryPb.SegmentLoadInfo, idx int) UniqueID { return info.SegmentID })
l.node.metaReplica.addSegmentsLoadingList(segmentIDs) l.node.metaReplica.addSegmentsLoadingList(segmentIDs)
defer l.node.metaReplica.removeSegmentsLoadingList(segmentIDs) defer l.node.metaReplica.removeSegmentsLoadingList(segmentIDs)
err := l.node.loader.LoadSegment(l.req, segmentTypeSealed) err := l.node.loader.LoadSegment(l.ctx, l.req, segmentTypeSealed)
if err != nil { if err != nil {
log.Warn("failed to load segment", zap.Int64("collectionID", l.req.CollectionID), log.Warn("failed to load segment", zap.Int64("collectionID", l.req.CollectionID),
zap.Int64("replicaID", l.req.ReplicaID), zap.Error(err)) zap.Int64("replicaID", l.req.ReplicaID), zap.Error(err))

View File

@ -17,6 +17,7 @@
package storage package storage
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -58,8 +59,8 @@ func (lcm *LocalChunkManager) RootPath() string {
} }
// Path returns the path of local data if exists. // Path returns the path of local data if exists.
func (lcm *LocalChunkManager) Path(filePath string) (string, error) { func (lcm *LocalChunkManager) Path(ctx context.Context, filePath string) (string, error) {
exist, err := lcm.Exist(filePath) exist, err := lcm.Exist(ctx, filePath)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -71,8 +72,8 @@ func (lcm *LocalChunkManager) Path(filePath string) (string, error) {
return absPath, nil return absPath, nil
} }
func (lcm *LocalChunkManager) Reader(filePath string) (FileReader, error) { func (lcm *LocalChunkManager) Reader(ctx context.Context, filePath string) (FileReader, error) {
exist, err := lcm.Exist(filePath) exist, err := lcm.Exist(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -84,10 +85,10 @@ func (lcm *LocalChunkManager) Reader(filePath string) (FileReader, error) {
} }
// Write writes the data to local storage. // Write writes the data to local storage.
func (lcm *LocalChunkManager) Write(filePath string, content []byte) error { func (lcm *LocalChunkManager) Write(ctx context.Context, filePath string, content []byte) error {
absPath := path.Join(lcm.localPath, filePath) absPath := path.Join(lcm.localPath, filePath)
dir := path.Dir(absPath) dir := path.Dir(absPath)
exist, err := lcm.Exist(dir) exist, err := lcm.Exist(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
@ -101,10 +102,10 @@ func (lcm *LocalChunkManager) Write(filePath string, content []byte) error {
} }
// MultiWrite writes the data to local storage. // MultiWrite writes the data to local storage.
func (lcm *LocalChunkManager) MultiWrite(contents map[string][]byte) error { func (lcm *LocalChunkManager) MultiWrite(ctx context.Context, contents map[string][]byte) error {
var el errorutil.ErrorList var el errorutil.ErrorList
for filePath, content := range contents { for filePath, content := range contents {
err := lcm.Write(filePath, content) err := lcm.Write(ctx, filePath, content)
if err != nil { if err != nil {
el = append(el, err) el = append(el, err)
} }
@ -116,7 +117,7 @@ func (lcm *LocalChunkManager) MultiWrite(contents map[string][]byte) error {
} }
// Exist checks whether chunk is saved to local storage. // Exist checks whether chunk is saved to local storage.
func (lcm *LocalChunkManager) Exist(filePath string) (bool, error) { func (lcm *LocalChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {
absPath := path.Join(lcm.localPath, filePath) absPath := path.Join(lcm.localPath, filePath)
_, err := os.Stat(absPath) _, err := os.Stat(absPath)
if err != nil { if err != nil {
@ -129,8 +130,8 @@ func (lcm *LocalChunkManager) Exist(filePath string) (bool, error) {
} }
// Read reads the local storage data if exists. // Read reads the local storage data if exists.
func (lcm *LocalChunkManager) Read(filePath string) ([]byte, error) { func (lcm *LocalChunkManager) Read(ctx context.Context, filePath string) ([]byte, error) {
exist, err := lcm.Exist(filePath) exist, err := lcm.Exist(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -151,11 +152,11 @@ func (lcm *LocalChunkManager) Read(filePath string) ([]byte, error) {
} }
// MultiRead reads the local storage data if exists. // MultiRead reads the local storage data if exists.
func (lcm *LocalChunkManager) MultiRead(filePaths []string) ([][]byte, error) { func (lcm *LocalChunkManager) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error) {
results := make([][]byte, len(filePaths)) results := make([][]byte, len(filePaths))
var el errorutil.ErrorList var el errorutil.ErrorList
for i, filePath := range filePaths { for i, filePath := range filePaths {
content, err := lcm.Read(filePath) content, err := lcm.Read(ctx, filePath)
if err != nil { if err != nil {
el = append(el, err) el = append(el, err)
} }
@ -167,7 +168,7 @@ func (lcm *LocalChunkManager) MultiRead(filePaths []string) ([][]byte, error) {
return results, el return results, el
} }
func (lcm *LocalChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (lcm *LocalChunkManager) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
var filePaths []string var filePaths []string
var modTimes []time.Time var modTimes []time.Time
if recursive { if recursive {
@ -209,17 +210,17 @@ func (lcm *LocalChunkManager) ListWithPrefix(prefix string, recursive bool) ([]s
return filePaths, modTimes, nil return filePaths, modTimes, nil
} }
func (lcm *LocalChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (lcm *LocalChunkManager) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
filePaths, _, err := lcm.ListWithPrefix(prefix, true) filePaths, _, err := lcm.ListWithPrefix(ctx, prefix, true)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
result, err := lcm.MultiRead(filePaths) result, err := lcm.MultiRead(ctx, filePaths)
return filePaths, result, err return filePaths, result, err
} }
// ReadAt reads specific position data of local storage if exists. // ReadAt reads specific position data of local storage if exists.
func (lcm *LocalChunkManager) ReadAt(filePath string, off int64, length int64) ([]byte, error) { func (lcm *LocalChunkManager) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {
if off < 0 || length < 0 { if off < 0 || length < 0 {
return nil, io.EOF return nil, io.EOF
} }
@ -236,12 +237,12 @@ func (lcm *LocalChunkManager) ReadAt(filePath string, off int64, length int64) (
return res, nil return res, nil
} }
func (lcm *LocalChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) { func (lcm *LocalChunkManager) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error) {
absPath := path.Join(lcm.localPath, filePath) absPath := path.Join(lcm.localPath, filePath)
return mmap.Open(path.Clean(absPath)) return mmap.Open(path.Clean(absPath))
} }
func (lcm *LocalChunkManager) Size(filePath string) (int64, error) { func (lcm *LocalChunkManager) Size(ctx context.Context, filePath string) (int64, error) {
absPath := path.Join(lcm.localPath, filePath) absPath := path.Join(lcm.localPath, filePath)
fi, err := os.Stat(absPath) fi, err := os.Stat(absPath)
if err != nil { if err != nil {
@ -252,8 +253,8 @@ func (lcm *LocalChunkManager) Size(filePath string) (int64, error) {
return size, nil return size, nil
} }
func (lcm *LocalChunkManager) Remove(filePath string) error { func (lcm *LocalChunkManager) Remove(ctx context.Context, filePath string) error {
exist, err := lcm.Exist(filePath) exist, err := lcm.Exist(ctx, filePath)
if err != nil { if err != nil {
return err return err
} }
@ -267,10 +268,10 @@ func (lcm *LocalChunkManager) Remove(filePath string) error {
return nil return nil
} }
func (lcm *LocalChunkManager) MultiRemove(filePaths []string) error { func (lcm *LocalChunkManager) MultiRemove(ctx context.Context, filePaths []string) error {
var el errorutil.ErrorList var el errorutil.ErrorList
for _, filePath := range filePaths { for _, filePath := range filePaths {
err := lcm.Remove(filePath) err := lcm.Remove(ctx, filePath)
if err != nil { if err != nil {
el = append(el, err) el = append(el, err)
} }
@ -281,12 +282,12 @@ func (lcm *LocalChunkManager) MultiRemove(filePaths []string) error {
return el return el
} }
func (lcm *LocalChunkManager) RemoveWithPrefix(prefix string) error { func (lcm *LocalChunkManager) RemoveWithPrefix(ctx context.Context, prefix string) error {
filePaths, _, err := lcm.ListWithPrefix(prefix, true) filePaths, _, err := lcm.ListWithPrefix(ctx, prefix, true)
if err != nil { if err != nil {
return err return err
} }
return lcm.MultiRemove(filePaths) return lcm.MultiRemove(ctx, filePaths)
} }
func (lcm *LocalChunkManager) getModTime(filepath string) (time.Time, error) { func (lcm *LocalChunkManager) getModTime(filepath string) (time.Time, error) {

View File

@ -17,6 +17,7 @@
package storage package storage
import ( import (
"context"
"fmt" "fmt"
"path" "path"
"testing" "testing"
@ -26,6 +27,7 @@ import (
) )
func TestLocalCM(t *testing.T) { func TestLocalCM(t *testing.T) {
ctx := context.Background()
t.Run("test RootPath", func(t *testing.T) { t.Run("test RootPath", func(t *testing.T) {
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
assert.Equal(t, localPath, testCM.RootPath()) assert.Equal(t, localPath, testCM.RootPath())
@ -35,7 +37,7 @@ func TestLocalCM(t *testing.T) {
testLoadRoot := "test_load" testLoadRoot := "test_load"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testLoadRoot) defer testCM.RemoveWithPrefix(ctx, testLoadRoot)
prepareTests := []struct { prepareTests := []struct {
key string key string
@ -49,7 +51,7 @@ func TestLocalCM(t *testing.T) {
} }
for _, test := range prepareTests { for _, test := range prepareTests {
err := testCM.Write(path.Join(testLoadRoot, test.key), test.value) err := testCM.Write(ctx, path.Join(testLoadRoot, test.key), test.value)
require.NoError(t, err) require.NoError(t, err)
} }
@ -72,17 +74,17 @@ func TestLocalCM(t *testing.T) {
for _, test := range loadTests { for _, test := range loadTests {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
if test.isvalid { if test.isvalid {
got, err := testCM.Read(path.Join(testLoadRoot, test.loadKey)) got, err := testCM.Read(ctx, path.Join(testLoadRoot, test.loadKey))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expectedValue, got) assert.Equal(t, test.expectedValue, got)
} else { } else {
if test.loadKey == "/" { if test.loadKey == "/" {
got, err := testCM.Read(test.loadKey) got, err := testCM.Read(ctx, test.loadKey)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, got) assert.Empty(t, got)
return return
} }
got, err := testCM.Read(path.Join(testLoadRoot, test.loadKey)) got, err := testCM.Read(ctx, path.Join(testLoadRoot, test.loadKey))
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, got) assert.Empty(t, got)
} }
@ -103,7 +105,7 @@ func TestLocalCM(t *testing.T) {
for _, test := range loadWithPrefixTests { for _, test := range loadWithPrefixTests {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
gotk, gotv, err := testCM.ReadWithPrefix(path.Join(testLoadRoot, test.prefix)) gotk, gotv, err := testCM.ReadWithPrefix(ctx, path.Join(testLoadRoot, test.prefix))
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, len(test.expectedValue), len(gotk)) assert.Equal(t, len(test.expectedValue), len(gotk))
assert.Equal(t, len(test.expectedValue), len(gotv)) assert.Equal(t, len(test.expectedValue), len(gotv))
@ -128,11 +130,11 @@ func TestLocalCM(t *testing.T) {
test.multiKeys[i] = path.Join(testLoadRoot, test.multiKeys[i]) test.multiKeys[i] = path.Join(testLoadRoot, test.multiKeys[i])
} }
if test.isvalid { if test.isvalid {
got, err := testCM.MultiRead(test.multiKeys) got, err := testCM.MultiRead(ctx, test.multiKeys)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, test.expectedValue, got) assert.Equal(t, test.expectedValue, got)
} else { } else {
got, err := testCM.MultiRead(test.multiKeys) got, err := testCM.MultiRead(ctx, test.multiKeys)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, test.expectedValue, got) assert.Equal(t, test.expectedValue, got)
} }
@ -146,20 +148,20 @@ func TestLocalCM(t *testing.T) {
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
//defer testCM.RemoveWithPrefix(testMultiSaveRoot) //defer testCM.RemoveWithPrefix(testMultiSaveRoot)
err := testCM.Write(path.Join(testMultiSaveRoot, "key_1"), []byte("111")) err := testCM.Write(ctx, path.Join(testMultiSaveRoot, "key_1"), []byte("111"))
assert.Nil(t, err) assert.Nil(t, err)
err = testCM.Write(path.Join(testMultiSaveRoot, "key_2"), []byte("222")) err = testCM.Write(ctx, path.Join(testMultiSaveRoot, "key_2"), []byte("222"))
assert.Nil(t, err) assert.Nil(t, err)
val, err := testCM.Read(path.Join(testMultiSaveRoot, "key_1")) val, err := testCM.Read(ctx, path.Join(testMultiSaveRoot, "key_1"))
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, []byte("111"), val) assert.Equal(t, []byte("111"), val)
val, err = testCM.Read(path.Join(testMultiSaveRoot, "key_2")) val, err = testCM.Read(ctx, path.Join(testMultiSaveRoot, "key_2"))
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, []byte("222"), val) assert.Equal(t, []byte("222"), val)
err = testCM.Write(path.Join(testMultiSaveRoot, "key_1/key_1"), []byte("111")) err = testCM.Write(ctx, path.Join(testMultiSaveRoot, "key_1/key_1"), []byte("111"))
assert.Error(t, err) assert.Error(t, err)
}) })
@ -168,9 +170,9 @@ func TestLocalCM(t *testing.T) {
testMultiSaveRoot := "test_multisave" testMultiSaveRoot := "test_multisave"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testMultiSaveRoot) defer testCM.RemoveWithPrefix(ctx, testMultiSaveRoot)
err := testCM.Write(path.Join(testMultiSaveRoot, "key_1"), []byte("111")) err := testCM.Write(ctx, path.Join(testMultiSaveRoot, "key_1"), []byte("111"))
assert.Nil(t, err) assert.Nil(t, err)
kvs := map[string][]byte{ kvs := map[string][]byte{
@ -178,10 +180,10 @@ func TestLocalCM(t *testing.T) {
path.Join(testMultiSaveRoot, "key_2"): []byte("456"), path.Join(testMultiSaveRoot, "key_2"): []byte("456"),
} }
err = testCM.MultiWrite(kvs) err = testCM.MultiWrite(ctx, kvs)
assert.Nil(t, err) assert.Nil(t, err)
val, err := testCM.Read(path.Join(testMultiSaveRoot, "key_1")) val, err := testCM.Read(ctx, path.Join(testMultiSaveRoot, "key_1"))
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, []byte("123"), val) assert.Equal(t, []byte("123"), val)
@ -190,7 +192,7 @@ func TestLocalCM(t *testing.T) {
path.Join(testMultiSaveRoot, "key_2/key_2"): []byte("456"), path.Join(testMultiSaveRoot, "key_2/key_2"): []byte("456"),
} }
err = testCM.MultiWrite(kvs) err = testCM.MultiWrite(ctx, kvs)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -198,7 +200,7 @@ func TestLocalCM(t *testing.T) {
testRemoveRoot := "test_remove" testRemoveRoot := "test_remove"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testRemoveRoot) defer testCM.RemoveWithPrefix(ctx, testRemoveRoot)
prepareTests := []struct { prepareTests := []struct {
k string k string
@ -216,7 +218,7 @@ func TestLocalCM(t *testing.T) {
for _, test := range prepareTests { for _, test := range prepareTests {
k := path.Join(testRemoveRoot, test.k) k := path.Join(testRemoveRoot, test.k)
err := testCM.Write(k, test.v) err := testCM.Write(ctx, k, test.v)
require.NoError(t, err) require.NoError(t, err)
} }
@ -233,14 +235,14 @@ func TestLocalCM(t *testing.T) {
for _, test := range removeTests { for _, test := range removeTests {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
k := path.Join(testRemoveRoot, test.removeKey) k := path.Join(testRemoveRoot, test.removeKey)
v, err := testCM.Read(k) v, err := testCM.Read(ctx, k)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.valueBeforeRemove, v) require.Equal(t, test.valueBeforeRemove, v)
err = testCM.Remove(k) err = testCM.Remove(ctx, k)
assert.NoError(t, err) assert.NoError(t, err)
v, err = testCM.Read(k) v, err = testCM.Read(ctx, k)
require.Error(t, err) require.Error(t, err)
require.Empty(t, v) require.Empty(t, v)
}) })
@ -252,15 +254,15 @@ func TestLocalCM(t *testing.T) {
path.Join(testRemoveRoot, "mkey_3"), path.Join(testRemoveRoot, "mkey_3"),
} }
lv, err := testCM.MultiRead(multiRemoveTest) lv, err := testCM.MultiRead(ctx, multiRemoveTest)
require.Nil(t, err) require.Nil(t, err)
require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv) require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv)
err = testCM.MultiRemove(multiRemoveTest) err = testCM.MultiRemove(ctx, multiRemoveTest)
assert.Nil(t, err) assert.Nil(t, err)
for _, k := range multiRemoveTest { for _, k := range multiRemoveTest {
v, err := testCM.Read(k) v, err := testCM.Read(ctx, k)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, v) assert.Empty(t, v)
} }
@ -272,15 +274,15 @@ func TestLocalCM(t *testing.T) {
} }
removePrefix := path.Join(testRemoveRoot, "key_prefix") removePrefix := path.Join(testRemoveRoot, "key_prefix")
lv, err = testCM.MultiRead(removeWithPrefixTest) lv, err = testCM.MultiRead(ctx, removeWithPrefixTest)
require.NoError(t, err) require.NoError(t, err)
require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv) require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv)
err = testCM.RemoveWithPrefix(removePrefix) err = testCM.RemoveWithPrefix(ctx, removePrefix)
assert.NoError(t, err) assert.NoError(t, err)
for _, k := range removeWithPrefixTest { for _, k := range removeWithPrefixTest {
v, err := testCM.Read(k) v, err := testCM.Read(ctx, k)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, v) assert.Empty(t, v)
} }
@ -290,44 +292,44 @@ func TestLocalCM(t *testing.T) {
testLoadPartialRoot := "read_at" testLoadPartialRoot := "read_at"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testLoadPartialRoot) defer testCM.RemoveWithPrefix(ctx, testLoadPartialRoot)
key := path.Join(testLoadPartialRoot, "TestMinIOKV_LoadPartial_key") key := path.Join(testLoadPartialRoot, "TestMinIOKV_LoadPartial_key")
value := []byte("TestMinIOKV_LoadPartial_value") value := []byte("TestMinIOKV_LoadPartial_value")
err := testCM.Write(key, value) err := testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
var off, length int64 var off, length int64
var partial []byte var partial []byte
off, length = 1, 1 off, length = 1, 1
partial, err = testCM.ReadAt(key, off, length) partial, err = testCM.ReadAt(ctx, key, off, length)
assert.NoError(t, err) assert.NoError(t, err)
assert.ElementsMatch(t, partial, value[off:off+length]) assert.ElementsMatch(t, partial, value[off:off+length])
off, length = 0, int64(len(value)) off, length = 0, int64(len(value))
partial, err = testCM.ReadAt(key, off, length) partial, err = testCM.ReadAt(ctx, key, off, length)
assert.NoError(t, err) assert.NoError(t, err)
assert.ElementsMatch(t, partial, value[off:off+length]) assert.ElementsMatch(t, partial, value[off:off+length])
// error case // error case
off, length = 5, -2 off, length = 5, -2
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
off, length = -1, 2 off, length = -1, 2
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
off, length = 1, -2 off, length = 1, -2
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
err = testCM.Remove(key) err = testCM.Remove(ctx, key)
assert.NoError(t, err) assert.NoError(t, err)
off, length = 1, 1 off, length = 1, 1
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -335,21 +337,21 @@ func TestLocalCM(t *testing.T) {
testGetSizeRoot := "get_size" testGetSizeRoot := "get_size"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testGetSizeRoot) defer testCM.RemoveWithPrefix(ctx, testGetSizeRoot)
key := path.Join(testGetSizeRoot, "TestMinIOKV_GetSize_key") key := path.Join(testGetSizeRoot, "TestMinIOKV_GetSize_key")
value := []byte("TestMinIOKV_GetSize_value") value := []byte("TestMinIOKV_GetSize_value")
err := testCM.Write(key, value) err := testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
size, err := testCM.Size(key) size, err := testCM.Size(ctx, key)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, size, int64(len(value))) assert.Equal(t, size, int64(len(value)))
key2 := path.Join(testGetSizeRoot, "TestMemoryKV_GetSize_key2") key2 := path.Join(testGetSizeRoot, "TestMemoryKV_GetSize_key2")
size, err = testCM.Size(key2) size, err = testCM.Size(ctx, key2)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, int64(0), size) assert.Equal(t, int64(0), size)
}) })
@ -358,21 +360,21 @@ func TestLocalCM(t *testing.T) {
testGetSizeRoot := "get_path" testGetSizeRoot := "get_path"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testGetSizeRoot) defer testCM.RemoveWithPrefix(ctx, testGetSizeRoot)
key := path.Join(testGetSizeRoot, "TestMinIOKV_GetPath_key") key := path.Join(testGetSizeRoot, "TestMinIOKV_GetPath_key")
value := []byte("TestMinIOKV_GetPath_value") value := []byte("TestMinIOKV_GetPath_value")
err := testCM.Write(key, value) err := testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
p, err := testCM.Path(key) p, err := testCM.Path(ctx, key)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, p, path.Join(localPath, key)) assert.Equal(t, p, path.Join(localPath, key))
key2 := path.Join(testGetSizeRoot, "TestMemoryKV_GetSize_key2") key2 := path.Join(testGetSizeRoot, "TestMemoryKV_GetSize_key2")
p, err = testCM.Path(key2) p, err = testCM.Path(ctx, key2)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, p, "") assert.Equal(t, p, "")
}) })
@ -381,29 +383,29 @@ func TestLocalCM(t *testing.T) {
testPrefix := "prefix" testPrefix := "prefix"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testPrefix) defer testCM.RemoveWithPrefix(ctx, testPrefix)
pathB := path.Join("a", "b") pathB := path.Join("a", "b")
key := path.Join(testPrefix, pathB) key := path.Join(testPrefix, pathB)
value := []byte("a") value := []byte("a")
err := testCM.Write(key, value) err := testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
pathC := path.Join("a", "c") pathC := path.Join("a", "c")
key = path.Join(testPrefix, pathC) key = path.Join(testPrefix, pathC)
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
pathPrefix := path.Join(testPrefix, "a") pathPrefix := path.Join(testPrefix, "a")
r, m, err := testCM.ListWithPrefix(pathPrefix, true) r, m, err := testCM.ListWithPrefix(ctx, pathPrefix, true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(r), 2) assert.Equal(t, len(r), 2)
assert.Equal(t, len(m), 2) assert.Equal(t, len(m), 2)
testCM.RemoveWithPrefix(testPrefix) testCM.RemoveWithPrefix(ctx, testPrefix)
r, m, err = testCM.ListWithPrefix(pathPrefix, true) r, m, err = testCM.ListWithPrefix(ctx, pathPrefix, true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(r), 0) assert.Equal(t, len(r), 0)
assert.Equal(t, len(m), 0) assert.Equal(t, len(m), 0)
@ -413,46 +415,46 @@ func TestLocalCM(t *testing.T) {
testPrefix := "prefix-ListWithPrefix" testPrefix := "prefix-ListWithPrefix"
testCM := NewLocalChunkManager(RootPath(localPath)) testCM := NewLocalChunkManager(RootPath(localPath))
defer testCM.RemoveWithPrefix(testPrefix) defer testCM.RemoveWithPrefix(ctx, testPrefix)
key := path.Join(testPrefix, "abc", "def") key := path.Join(testPrefix, "abc", "def")
value := []byte("a") value := []byte("a")
err := testCM.Write(key, value) err := testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
key = path.Join(testPrefix, "abc", "deg") key = path.Join(testPrefix, "abc", "deg")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
key = path.Join(testPrefix, "abd") key = path.Join(testPrefix, "abd")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
key = path.Join(testPrefix, "bcd") key = path.Join(testPrefix, "bcd")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
dirs, mods, err := testCM.ListWithPrefix(testPrefix+"/", false) dirs, mods, err := testCM.ListWithPrefix(ctx, testPrefix+"/", false)
assert.Nil(t, err) assert.Nil(t, err)
fmt.Println(dirs) fmt.Println(dirs)
assert.Equal(t, 3, len(dirs)) assert.Equal(t, 3, len(dirs))
assert.Equal(t, 3, len(mods)) assert.Equal(t, 3, len(mods))
testPrefix2 := path.Join(testPrefix, "a") testPrefix2 := path.Join(testPrefix, "a")
dirs, mods, err = testCM.ListWithPrefix(testPrefix2, false) dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix2, false)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(dirs)) assert.Equal(t, 2, len(dirs))
assert.Equal(t, 2, len(mods)) assert.Equal(t, 2, len(mods))
dirs, mods, err = testCM.ListWithPrefix(testPrefix2, false) dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix2, false)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 2, len(dirs)) assert.Equal(t, 2, len(dirs))
assert.Equal(t, 2, len(mods)) assert.Equal(t, 2, len(mods))
err = testCM.RemoveWithPrefix(testPrefix) err = testCM.RemoveWithPrefix(ctx, testPrefix)
assert.NoError(t, err) assert.NoError(t, err)
dirs, mods, err = testCM.ListWithPrefix(testPrefix, false) dirs, mods, err = testCM.ListWithPrefix(ctx, testPrefix, false)
assert.NoError(t, err) assert.NoError(t, err)
fmt.Println(dirs) fmt.Println(dirs)
// dir still exist // dir still exist

View File

@ -42,7 +42,7 @@ var CheckBucketRetryAttempts uint = 20
type MinioChunkManager struct { type MinioChunkManager struct {
*minio.Client *minio.Client
ctx context.Context // ctx context.Context
bucketName string bucketName string
rootPath string rootPath string
} }
@ -103,7 +103,6 @@ func newMinioChunkManagerWithConfig(ctx context.Context, c *config) (*MinioChunk
} }
mcm := &MinioChunkManager{ mcm := &MinioChunkManager{
ctx: ctx,
Client: minIOClient, Client: minIOClient,
bucketName: c.bucketName, bucketName: c.bucketName,
} }
@ -119,8 +118,7 @@ func (mcm *MinioChunkManager) normalizeRootPath(rootPath string) string {
} }
// SetVar set the variable value of mcm // SetVar set the variable value of mcm
func (mcm *MinioChunkManager) SetVar(ctx context.Context, bucketName string, rootPath string) { func (mcm *MinioChunkManager) SetVar(bucketName string, rootPath string) {
mcm.ctx = ctx
mcm.bucketName = bucketName mcm.bucketName = bucketName
mcm.rootPath = rootPath mcm.rootPath = rootPath
} }
@ -131,8 +129,8 @@ func (mcm *MinioChunkManager) RootPath() string {
} }
// Path returns the path of minio data if exists. // Path returns the path of minio data if exists.
func (mcm *MinioChunkManager) Path(filePath string) (string, error) { func (mcm *MinioChunkManager) Path(ctx context.Context, filePath string) (string, error) {
exist, err := mcm.Exist(filePath) exist, err := mcm.Exist(ctx, filePath)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -143,8 +141,8 @@ func (mcm *MinioChunkManager) Path(filePath string) (string, error) {
} }
// Reader returns the path of minio data if exists. // Reader returns the path of minio data if exists.
func (mcm *MinioChunkManager) Reader(filePath string) (FileReader, error) { func (mcm *MinioChunkManager) Reader(ctx context.Context, filePath string) (FileReader, error) {
reader, err := mcm.Client.GetObject(mcm.ctx, mcm.bucketName, filePath, minio.GetObjectOptions{}) reader, err := mcm.Client.GetObject(ctx, mcm.bucketName, filePath, minio.GetObjectOptions{})
if err != nil { if err != nil {
log.Warn("failed to get object", zap.String("path", filePath), zap.Error(err)) log.Warn("failed to get object", zap.String("path", filePath), zap.Error(err))
return nil, err return nil, err
@ -152,8 +150,8 @@ func (mcm *MinioChunkManager) Reader(filePath string) (FileReader, error) {
return reader, nil return reader, nil
} }
func (mcm *MinioChunkManager) Size(filePath string) (int64, error) { func (mcm *MinioChunkManager) Size(ctx context.Context, filePath string) (int64, error) {
objectInfo, err := mcm.Client.StatObject(mcm.ctx, mcm.bucketName, filePath, minio.StatObjectOptions{}) objectInfo, err := mcm.Client.StatObject(ctx, mcm.bucketName, filePath, minio.StatObjectOptions{})
if err != nil { if err != nil {
log.Warn("failed to stat object", zap.String("path", filePath), zap.Error(err)) log.Warn("failed to stat object", zap.String("path", filePath), zap.Error(err))
return 0, err return 0, err
@ -163,8 +161,8 @@ func (mcm *MinioChunkManager) Size(filePath string) (int64, error) {
} }
// Write writes the data to minio storage. // Write writes the data to minio storage.
func (mcm *MinioChunkManager) Write(filePath string, content []byte) error { func (mcm *MinioChunkManager) Write(ctx context.Context, filePath string, content []byte) error {
_, err := mcm.Client.PutObject(mcm.ctx, mcm.bucketName, filePath, bytes.NewReader(content), int64(len(content)), minio.PutObjectOptions{}) _, err := mcm.Client.PutObject(ctx, mcm.bucketName, filePath, bytes.NewReader(content), int64(len(content)), minio.PutObjectOptions{})
if err != nil { if err != nil {
log.Warn("failed to put object", zap.String("path", filePath), zap.Error(err)) log.Warn("failed to put object", zap.String("path", filePath), zap.Error(err))
@ -176,10 +174,10 @@ func (mcm *MinioChunkManager) Write(filePath string, content []byte) error {
// MultiWrite saves multiple objects, the path is the key of @kvs. // MultiWrite saves multiple objects, the path is the key of @kvs.
// The object value is the value of @kvs. // The object value is the value of @kvs.
func (mcm *MinioChunkManager) MultiWrite(kvs map[string][]byte) error { func (mcm *MinioChunkManager) MultiWrite(ctx context.Context, kvs map[string][]byte) error {
var el errorutil.ErrorList var el errorutil.ErrorList
for key, value := range kvs { for key, value := range kvs {
err := mcm.Write(key, value) err := mcm.Write(ctx, key, value)
if err != nil { if err != nil {
el = append(el, err) el = append(el, err)
} }
@ -191,8 +189,8 @@ func (mcm *MinioChunkManager) MultiWrite(kvs map[string][]byte) error {
} }
// Exist checks whether chunk is saved to minio storage. // Exist checks whether chunk is saved to minio storage.
func (mcm *MinioChunkManager) Exist(filePath string) (bool, error) { func (mcm *MinioChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {
_, err := mcm.Client.StatObject(mcm.ctx, mcm.bucketName, filePath, minio.StatObjectOptions{}) _, err := mcm.Client.StatObject(ctx, mcm.bucketName, filePath, minio.StatObjectOptions{})
if err != nil { if err != nil {
errResponse := minio.ToErrorResponse(err) errResponse := minio.ToErrorResponse(err)
if errResponse.Code == "NoSuchKey" { if errResponse.Code == "NoSuchKey" {
@ -205,8 +203,8 @@ func (mcm *MinioChunkManager) Exist(filePath string) (bool, error) {
} }
// Read reads the minio storage data if exists. // Read reads the minio storage data if exists.
func (mcm *MinioChunkManager) Read(filePath string) ([]byte, error) { func (mcm *MinioChunkManager) Read(ctx context.Context, filePath string) ([]byte, error) {
object, err := mcm.Client.GetObject(mcm.ctx, mcm.bucketName, filePath, minio.GetObjectOptions{}) object, err := mcm.Client.GetObject(ctx, mcm.bucketName, filePath, minio.GetObjectOptions{})
if err != nil { if err != nil {
log.Warn("failed to get object", zap.String("path", filePath), zap.Error(err)) log.Warn("failed to get object", zap.String("path", filePath), zap.Error(err))
return nil, err return nil, err
@ -225,11 +223,11 @@ func (mcm *MinioChunkManager) Read(filePath string) ([]byte, error) {
return data, nil return data, nil
} }
func (mcm *MinioChunkManager) MultiRead(keys []string) ([][]byte, error) { func (mcm *MinioChunkManager) MultiRead(ctx context.Context, keys []string) ([][]byte, error) {
var el errorutil.ErrorList var el errorutil.ErrorList
var objectsValues [][]byte var objectsValues [][]byte
for _, key := range keys { for _, key := range keys {
objectValue, err := mcm.Read(key) objectValue, err := mcm.Read(ctx, key)
if err != nil { if err != nil {
el = append(el, err) el = append(el, err)
} }
@ -242,12 +240,12 @@ func (mcm *MinioChunkManager) MultiRead(keys []string) ([][]byte, error) {
return objectsValues, el return objectsValues, el
} }
func (mcm *MinioChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (mcm *MinioChunkManager) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
objectsKeys, _, err := mcm.ListWithPrefix(prefix, true) objectsKeys, _, err := mcm.ListWithPrefix(ctx, prefix, true)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
objectsValues, err := mcm.MultiRead(objectsKeys) objectsValues, err := mcm.MultiRead(ctx, objectsKeys)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -255,12 +253,12 @@ func (mcm *MinioChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte,
return objectsKeys, objectsValues, nil return objectsKeys, objectsValues, nil
} }
func (mcm *MinioChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) { func (mcm *MinioChunkManager) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error) {
return nil, errors.New("this method has not been implemented") return nil, errors.New("this method has not been implemented")
} }
// ReadAt reads specific position data of minio storage if exists. // ReadAt reads specific position data of minio storage if exists.
func (mcm *MinioChunkManager) ReadAt(filePath string, off int64, length int64) ([]byte, error) { func (mcm *MinioChunkManager) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {
if off < 0 || length < 0 { if off < 0 || length < 0 {
return nil, io.EOF return nil, io.EOF
} }
@ -272,7 +270,7 @@ func (mcm *MinioChunkManager) ReadAt(filePath string, off int64, length int64) (
return nil, err return nil, err
} }
object, err := mcm.Client.GetObject(mcm.ctx, mcm.bucketName, filePath, opts) object, err := mcm.Client.GetObject(ctx, mcm.bucketName, filePath, opts)
if err != nil { if err != nil {
log.Warn("failed to get object", zap.String("path", filePath), zap.Error(err)) log.Warn("failed to get object", zap.String("path", filePath), zap.Error(err))
return nil, err return nil, err
@ -287,8 +285,8 @@ func (mcm *MinioChunkManager) ReadAt(filePath string, off int64, length int64) (
} }
// Remove deletes an object with @key. // Remove deletes an object with @key.
func (mcm *MinioChunkManager) Remove(filePath string) error { func (mcm *MinioChunkManager) Remove(ctx context.Context, filePath string) error {
err := mcm.Client.RemoveObject(mcm.ctx, mcm.bucketName, filePath, minio.RemoveObjectOptions{}) err := mcm.Client.RemoveObject(ctx, mcm.bucketName, filePath, minio.RemoveObjectOptions{})
if err != nil { if err != nil {
log.Warn("failed to remove object", zap.String("path", filePath), zap.Error(err)) log.Warn("failed to remove object", zap.String("path", filePath), zap.Error(err))
return err return err
@ -297,10 +295,10 @@ func (mcm *MinioChunkManager) Remove(filePath string) error {
} }
// MultiRemove deletes a objects with @keys. // MultiRemove deletes a objects with @keys.
func (mcm *MinioChunkManager) MultiRemove(keys []string) error { func (mcm *MinioChunkManager) MultiRemove(ctx context.Context, keys []string) error {
var el errorutil.ErrorList var el errorutil.ErrorList
for _, key := range keys { for _, key := range keys {
err := mcm.Remove(key) err := mcm.Remove(ctx, key)
if err != nil { if err != nil {
el = append(el, err) el = append(el, err)
} }
@ -312,9 +310,9 @@ func (mcm *MinioChunkManager) MultiRemove(keys []string) error {
} }
// RemoveWithPrefix removes all objects with the same prefix @prefix from minio. // RemoveWithPrefix removes all objects with the same prefix @prefix from minio.
func (mcm *MinioChunkManager) RemoveWithPrefix(prefix string) error { func (mcm *MinioChunkManager) RemoveWithPrefix(ctx context.Context, prefix string) error {
objects := mcm.Client.ListObjects(mcm.ctx, mcm.bucketName, minio.ListObjectsOptions{Prefix: prefix, Recursive: true}) objects := mcm.Client.ListObjects(ctx, mcm.bucketName, minio.ListObjectsOptions{Prefix: prefix, Recursive: true})
for rErr := range mcm.Client.RemoveObjects(mcm.ctx, mcm.bucketName, objects, minio.RemoveObjectsOptions{GovernanceBypass: false}) { for rErr := range mcm.Client.RemoveObjects(ctx, mcm.bucketName, objects, minio.RemoveObjectsOptions{GovernanceBypass: false}) {
if rErr.Err != nil { if rErr.Err != nil {
log.Warn("failed to remove objects", zap.String("prefix", prefix), zap.Error(rErr.Err)) log.Warn("failed to remove objects", zap.String("prefix", prefix), zap.Error(rErr.Err))
return rErr.Err return rErr.Err
@ -328,7 +326,7 @@ func (mcm *MinioChunkManager) RemoveWithPrefix(prefix string) error {
// say minio has followinng objects: [a, ab, a/b, ab/c] // say minio has followinng objects: [a, ab, a/b, ab/c]
// calling `ListWithPrefix` with `prefix` = a && `recursive` = false will only returns [a, ab] // calling `ListWithPrefix` with `prefix` = a && `recursive` = false will only returns [a, ab]
// If caller needs all objects without level limitation, `recursive` shall be true. // If caller needs all objects without level limitation, `recursive` shall be true.
func (mcm *MinioChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (mcm *MinioChunkManager) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
// cannot use ListObjects(ctx, bucketName, Opt{Prefix:prefix, Recursive:true}) // cannot use ListObjects(ctx, bucketName, Opt{Prefix:prefix, Recursive:true})
// if minio has lots of objects under the provided path // if minio has lots of objects under the provided path
@ -347,7 +345,7 @@ func (mcm *MinioChunkManager) ListWithPrefix(prefix string, recursive bool) ([]s
// TODO add concurrent call if performance matters // TODO add concurrent call if performance matters
// only return current level per call // only return current level per call
objects := mcm.Client.ListObjects(mcm.ctx, mcm.bucketName, minio.ListObjectsOptions{Prefix: pre, Recursive: false}) objects := mcm.Client.ListObjects(ctx, mcm.bucketName, minio.ListObjectsOptions{Prefix: pre, Recursive: false})
for object := range objects { for object := range objects {
if object.Err != nil { if object.Err != nil {

View File

@ -96,7 +96,7 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testLoadRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testLoadRoot)
require.NoError(t, err) require.NoError(t, err)
defer testCM.RemoveWithPrefix(testLoadRoot) defer testCM.RemoveWithPrefix(ctx, testLoadRoot)
assert.Equal(t, testLoadRoot, testCM.RootPath()) assert.Equal(t, testLoadRoot, testCM.RootPath())
@ -112,7 +112,7 @@ func TestMinIOCM(t *testing.T) {
} }
for _, test := range prepareTests { for _, test := range prepareTests {
err = testCM.Write(path.Join(testLoadRoot, test.key), test.value) err = testCM.Write(ctx, path.Join(testLoadRoot, test.key), test.value)
require.NoError(t, err) require.NoError(t, err)
} }
@ -135,17 +135,17 @@ func TestMinIOCM(t *testing.T) {
for _, test := range loadTests { for _, test := range loadTests {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
if test.isvalid { if test.isvalid {
got, err := testCM.Read(path.Join(testLoadRoot, test.loadKey)) got, err := testCM.Read(ctx, path.Join(testLoadRoot, test.loadKey))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expectedValue, got) assert.Equal(t, test.expectedValue, got)
} else { } else {
if test.loadKey == "/" { if test.loadKey == "/" {
got, err := testCM.Read(test.loadKey) got, err := testCM.Read(ctx, test.loadKey)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, got) assert.Empty(t, got)
return return
} }
got, err := testCM.Read(path.Join(testLoadRoot, test.loadKey)) got, err := testCM.Read(ctx, path.Join(testLoadRoot, test.loadKey))
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, got) assert.Empty(t, got)
} }
@ -166,7 +166,7 @@ func TestMinIOCM(t *testing.T) {
for _, test := range loadWithPrefixTests { for _, test := range loadWithPrefixTests {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
gotk, gotv, err := testCM.ReadWithPrefix(path.Join(testLoadRoot, test.prefix)) gotk, gotv, err := testCM.ReadWithPrefix(ctx, path.Join(testLoadRoot, test.prefix))
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(test.expectedValue), len(gotk)) assert.Equal(t, len(test.expectedValue), len(gotk))
assert.Equal(t, len(test.expectedValue), len(gotv)) assert.Equal(t, len(test.expectedValue), len(gotv))
@ -191,11 +191,11 @@ func TestMinIOCM(t *testing.T) {
test.multiKeys[i] = path.Join(testLoadRoot, test.multiKeys[i]) test.multiKeys[i] = path.Join(testLoadRoot, test.multiKeys[i])
} }
if test.isvalid { if test.isvalid {
got, err := testCM.MultiRead(test.multiKeys) got, err := testCM.MultiRead(ctx, test.multiKeys)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expectedValue, got) assert.Equal(t, test.expectedValue, got)
} else { } else {
got, err := testCM.MultiRead(test.multiKeys) got, err := testCM.MultiRead(ctx, test.multiKeys)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, test.expectedValue, got) assert.Equal(t, test.expectedValue, got)
} }
@ -211,9 +211,9 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testMultiSaveRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testMultiSaveRoot)
assert.Nil(t, err) assert.Nil(t, err)
defer testCM.RemoveWithPrefix(testMultiSaveRoot) defer testCM.RemoveWithPrefix(ctx, testMultiSaveRoot)
err = testCM.Write(path.Join(testMultiSaveRoot, "key_1"), []byte("111")) err = testCM.Write(ctx, path.Join(testMultiSaveRoot, "key_1"), []byte("111"))
assert.Nil(t, err) assert.Nil(t, err)
kvs := map[string][]byte{ kvs := map[string][]byte{
@ -221,10 +221,10 @@ func TestMinIOCM(t *testing.T) {
path.Join(testMultiSaveRoot, "key_2"): []byte("456"), path.Join(testMultiSaveRoot, "key_2"): []byte("456"),
} }
err = testCM.MultiWrite(kvs) err = testCM.MultiWrite(ctx, kvs)
assert.Nil(t, err) assert.Nil(t, err)
val, err := testCM.Read(path.Join(testMultiSaveRoot, "key_1")) val, err := testCM.Read(ctx, path.Join(testMultiSaveRoot, "key_1"))
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, []byte("123"), val) assert.Equal(t, []byte("123"), val)
}) })
@ -236,7 +236,7 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testRemoveRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testRemoveRoot)
assert.Nil(t, err) assert.Nil(t, err)
defer testCM.RemoveWithPrefix(testRemoveRoot) defer testCM.RemoveWithPrefix(ctx, testRemoveRoot)
prepareTests := []struct { prepareTests := []struct {
k string k string
@ -254,7 +254,7 @@ func TestMinIOCM(t *testing.T) {
for _, test := range prepareTests { for _, test := range prepareTests {
k := path.Join(testRemoveRoot, test.k) k := path.Join(testRemoveRoot, test.k)
err = testCM.Write(k, test.v) err = testCM.Write(ctx, k, test.v)
require.NoError(t, err) require.NoError(t, err)
} }
@ -271,14 +271,14 @@ func TestMinIOCM(t *testing.T) {
for _, test := range removeTests { for _, test := range removeTests {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
k := path.Join(testRemoveRoot, test.removeKey) k := path.Join(testRemoveRoot, test.removeKey)
v, err := testCM.Read(k) v, err := testCM.Read(ctx, k)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.valueBeforeRemove, v) require.Equal(t, test.valueBeforeRemove, v)
err = testCM.Remove(k) err = testCM.Remove(ctx, k)
assert.NoError(t, err) assert.NoError(t, err)
v, err = testCM.Read(k) v, err = testCM.Read(ctx, k)
require.Error(t, err) require.Error(t, err)
require.Empty(t, v) require.Empty(t, v)
}) })
@ -290,15 +290,15 @@ func TestMinIOCM(t *testing.T) {
path.Join(testRemoveRoot, "mkey_3"), path.Join(testRemoveRoot, "mkey_3"),
} }
lv, err := testCM.MultiRead(multiRemoveTest) lv, err := testCM.MultiRead(ctx, multiRemoveTest)
require.NoError(t, err) require.NoError(t, err)
require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv) require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv)
err = testCM.MultiRemove(multiRemoveTest) err = testCM.MultiRemove(ctx, multiRemoveTest)
assert.NoError(t, err) assert.NoError(t, err)
for _, k := range multiRemoveTest { for _, k := range multiRemoveTest {
v, err := testCM.Read(k) v, err := testCM.Read(ctx, k)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, v) assert.Empty(t, v)
} }
@ -310,15 +310,15 @@ func TestMinIOCM(t *testing.T) {
} }
removePrefix := path.Join(testRemoveRoot, "key_prefix") removePrefix := path.Join(testRemoveRoot, "key_prefix")
lv, err = testCM.MultiRead(removeWithPrefixTest) lv, err = testCM.MultiRead(ctx, removeWithPrefixTest)
require.NoError(t, err) require.NoError(t, err)
require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv) require.ElementsMatch(t, [][]byte{[]byte("111"), []byte("222"), []byte("333")}, lv)
err = testCM.RemoveWithPrefix(removePrefix) err = testCM.RemoveWithPrefix(ctx, removePrefix)
assert.NoError(t, err) assert.NoError(t, err)
for _, k := range removeWithPrefixTest { for _, k := range removeWithPrefixTest {
v, err := testCM.Read(k) v, err := testCM.Read(ctx, k)
assert.Error(t, err) assert.Error(t, err)
assert.Empty(t, v) assert.Empty(t, v)
} }
@ -332,44 +332,44 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testLoadPartialRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testLoadPartialRoot)
require.NoError(t, err) require.NoError(t, err)
defer testCM.RemoveWithPrefix(testLoadPartialRoot) defer testCM.RemoveWithPrefix(ctx, testLoadPartialRoot)
key := path.Join(testLoadPartialRoot, "TestMinIOKV_LoadPartial_key") key := path.Join(testLoadPartialRoot, "TestMinIOKV_LoadPartial_key")
value := []byte("TestMinIOKV_LoadPartial_value") value := []byte("TestMinIOKV_LoadPartial_value")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
var off, length int64 var off, length int64
var partial []byte var partial []byte
off, length = 1, 1 off, length = 1, 1
partial, err = testCM.ReadAt(key, off, length) partial, err = testCM.ReadAt(ctx, key, off, length)
assert.NoError(t, err) assert.NoError(t, err)
assert.ElementsMatch(t, partial, value[off:off+length]) assert.ElementsMatch(t, partial, value[off:off+length])
off, length = 0, int64(len(value)) off, length = 0, int64(len(value))
partial, err = testCM.ReadAt(key, off, length) partial, err = testCM.ReadAt(ctx, key, off, length)
assert.NoError(t, err) assert.NoError(t, err)
assert.ElementsMatch(t, partial, value[off:off+length]) assert.ElementsMatch(t, partial, value[off:off+length])
// error case // error case
off, length = 5, -2 off, length = 5, -2
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
off, length = -1, 2 off, length = -1, 2
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
off, length = 1, -2 off, length = 1, -2
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
err = testCM.Remove(key) err = testCM.Remove(ctx, key)
assert.NoError(t, err) assert.NoError(t, err)
off, length = 1, 1 off, length = 1, 1
_, err = testCM.ReadAt(key, off, length) _, err = testCM.ReadAt(ctx, key, off, length)
assert.Error(t, err) assert.Error(t, err)
}) })
@ -380,21 +380,21 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testGetSizeRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testGetSizeRoot)
require.NoError(t, err) require.NoError(t, err)
defer testCM.RemoveWithPrefix(testGetSizeRoot) defer testCM.RemoveWithPrefix(ctx, testGetSizeRoot)
key := path.Join(testGetSizeRoot, "TestMinIOKV_GetSize_key") key := path.Join(testGetSizeRoot, "TestMinIOKV_GetSize_key")
value := []byte("TestMinIOKV_GetSize_value") value := []byte("TestMinIOKV_GetSize_value")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
size, err := testCM.Size(key) size, err := testCM.Size(ctx, key)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, size, int64(len(value))) assert.Equal(t, size, int64(len(value)))
key2 := path.Join(testGetSizeRoot, "TestMemoryKV_GetSize_key2") key2 := path.Join(testGetSizeRoot, "TestMemoryKV_GetSize_key2")
size, err = testCM.Size(key2) size, err = testCM.Size(ctx, key2)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, int64(0), size) assert.Equal(t, int64(0), size)
}) })
@ -406,21 +406,21 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testGetPathRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testGetPathRoot)
require.NoError(t, err) require.NoError(t, err)
defer testCM.RemoveWithPrefix(testGetPathRoot) defer testCM.RemoveWithPrefix(ctx, testGetPathRoot)
key := path.Join(testGetPathRoot, "TestMinIOKV_GetSize_key") key := path.Join(testGetPathRoot, "TestMinIOKV_GetSize_key")
value := []byte("TestMinIOKV_GetSize_value") value := []byte("TestMinIOKV_GetSize_value")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
p, err := testCM.Path(key) p, err := testCM.Path(ctx, key)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, p, key) assert.Equal(t, p, key)
key2 := path.Join(testGetPathRoot, "TestMemoryKV_GetSize_key2") key2 := path.Join(testGetPathRoot, "TestMemoryKV_GetSize_key2")
p, err = testCM.Path(key2) p, err = testCM.Path(ctx, key2)
assert.Error(t, err) assert.Error(t, err)
assert.Equal(t, p, "") assert.Equal(t, p, "")
}) })
@ -432,15 +432,15 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testMmapRoot) testCM, err := newMinIOChunkManager(ctx, testBucket, testMmapRoot)
require.NoError(t, err) require.NoError(t, err)
defer testCM.RemoveWithPrefix(testMmapRoot) defer testCM.RemoveWithPrefix(ctx, testMmapRoot)
key := path.Join(testMmapRoot, "TestMinIOKV_GetSize_key") key := path.Join(testMmapRoot, "TestMinIOKV_GetSize_key")
value := []byte("TestMinIOKV_GetSize_value") value := []byte("TestMinIOKV_GetSize_value")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
r, err := testCM.Mmap(key) r, err := testCM.Mmap(ctx, key)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, r) assert.Nil(t, r)
@ -453,50 +453,50 @@ func TestMinIOCM(t *testing.T) {
testCM, err := newMinIOChunkManager(ctx, testBucket, testPrefix) testCM, err := newMinIOChunkManager(ctx, testBucket, testPrefix)
require.NoError(t, err) require.NoError(t, err)
defer testCM.RemoveWithPrefix(testPrefix) defer testCM.RemoveWithPrefix(ctx, testPrefix)
pathB := path.Join("a", "b") pathB := path.Join("a", "b")
key := path.Join(testPrefix, pathB) key := path.Join(testPrefix, pathB)
value := []byte("a") value := []byte("a")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
pathC := path.Join("a", "c") pathC := path.Join("a", "c")
key = path.Join(testPrefix, pathC) key = path.Join(testPrefix, pathC)
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
pathPrefix := path.Join(testPrefix, "a") pathPrefix := path.Join(testPrefix, "a")
r, m, err := testCM.ListWithPrefix(pathPrefix, true) r, m, err := testCM.ListWithPrefix(ctx, pathPrefix, true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(r), 2) assert.Equal(t, len(r), 2)
assert.Equal(t, len(m), 2) assert.Equal(t, len(m), 2)
key = path.Join(testPrefix, "b", "b", "b") key = path.Join(testPrefix, "b", "b", "b")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
key = path.Join(testPrefix, "b", "a", "b") key = path.Join(testPrefix, "b", "a", "b")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
key = path.Join(testPrefix, "bc", "a", "b") key = path.Join(testPrefix, "bc", "a", "b")
err = testCM.Write(key, value) err = testCM.Write(ctx, key, value)
assert.NoError(t, err) assert.NoError(t, err)
dirs, mods, err := testCM.ListWithPrefix(testPrefix+"/", true) dirs, mods, err := testCM.ListWithPrefix(ctx, testPrefix+"/", true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 5, len(dirs)) assert.Equal(t, 5, len(dirs))
assert.Equal(t, 5, len(mods)) assert.Equal(t, 5, len(mods))
dirs, mods, err = testCM.ListWithPrefix(path.Join(testPrefix, "b"), true) dirs, mods, err = testCM.ListWithPrefix(ctx, path.Join(testPrefix, "b"), true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 3, len(dirs)) assert.Equal(t, 3, len(dirs))
assert.Equal(t, 3, len(mods)) assert.Equal(t, 3, len(mods))
testCM.RemoveWithPrefix(testPrefix) testCM.RemoveWithPrefix(ctx, testPrefix)
r, m, err = testCM.ListWithPrefix(pathPrefix, true) r, m, err = testCM.ListWithPrefix(ctx, pathPrefix, true)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 0, len(r)) assert.Equal(t, 0, len(r))
assert.Equal(t, 0, len(m)) assert.Equal(t, 0, len(m))
@ -504,7 +504,7 @@ func TestMinIOCM(t *testing.T) {
// test wrong prefix // test wrong prefix
b := make([]byte, 2048) b := make([]byte, 2048)
pathWrong := path.Join(testPrefix, string(b)) pathWrong := path.Join(testPrefix, string(b))
_, _, err = testCM.ListWithPrefix(pathWrong, true) _, _, err = testCM.ListWithPrefix(ctx, pathWrong, true)
assert.Error(t, err) assert.Error(t, err)
}) })
} }

View File

@ -12,6 +12,7 @@
package storage package storage
import ( import (
"context"
"io" "io"
"time" "time"
@ -29,33 +30,33 @@ type ChunkManager interface {
// RootPath returns current root path. // RootPath returns current root path.
RootPath() string RootPath() string
// Path returns path of @filePath. // Path returns path of @filePath.
Path(filePath string) (string, error) Path(ctx context.Context, filePath string) (string, error)
// Size returns path of @filePath. // Size returns path of @filePath.
Size(filePath string) (int64, error) Size(ctx context.Context, filePath string) (int64, error)
// Write writes @content to @filePath. // Write writes @content to @filePath.
Write(filePath string, content []byte) error Write(ctx context.Context, filePath string, content []byte) error
// MultiWrite writes multi @content to @filePath. // MultiWrite writes multi @content to @filePath.
MultiWrite(contents map[string][]byte) error MultiWrite(ctx context.Context, contents map[string][]byte) error
// Exist returns true if @filePath exists. // Exist returns true if @filePath exists.
Exist(filePath string) (bool, error) Exist(ctx context.Context, filePath string) (bool, error)
// Read reads @filePath and returns content. // Read reads @filePath and returns content.
Read(filePath string) ([]byte, error) Read(ctx context.Context, filePath string) ([]byte, error)
// Reader return a reader for @filePath // Reader return a reader for @filePath
Reader(filePath string) (FileReader, error) Reader(ctx context.Context, filePath string) (FileReader, error)
// MultiRead reads @filePath and returns content. // MultiRead reads @filePath and returns content.
MultiRead(filePaths []string) ([][]byte, error) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error)
ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error)
// ReadWithPrefix reads files with same @prefix and returns contents. // ReadWithPrefix reads files with same @prefix and returns contents.
ReadWithPrefix(prefix string) ([]string, [][]byte, error) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error)
Mmap(filePath string) (*mmap.ReaderAt, error) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error)
// ReadAt reads @filePath by offset @off, content stored in @p, return @n as the number of bytes read. // ReadAt reads @filePath by offset @off, content stored in @p, return @n as the number of bytes read.
// if all bytes are read, @err is io.EOF. // if all bytes are read, @err is io.EOF.
// return other error if read failed. // return other error if read failed.
ReadAt(filePath string, off int64, length int64) (p []byte, err error) ReadAt(ctx context.Context, filePath string, off int64, length int64) (p []byte, err error)
// Remove delete @filePath. // Remove delete @filePath.
Remove(filePath string) error Remove(ctx context.Context, filePath string) error
// MultiRemove delete @filePaths. // MultiRemove delete @filePaths.
MultiRemove(filePaths []string) error MultiRemove(ctx context.Context, filePaths []string) error
// RemoveWithPrefix remove files with same @prefix. // RemoveWithPrefix remove files with same @prefix.
RemoveWithPrefix(prefix string) error RemoveWithPrefix(ctx context.Context, prefix string) error
} }

View File

@ -17,6 +17,7 @@
package storage package storage
import ( import (
"context"
"errors" "errors"
"io" "io"
"sync" "sync"
@ -53,7 +54,7 @@ type VectorChunkManager struct {
var _ ChunkManager = (*VectorChunkManager)(nil) var _ ChunkManager = (*VectorChunkManager)(nil)
// NewVectorChunkManager create a new vector manager object. // NewVectorChunkManager create a new vector manager object.
func NewVectorChunkManager(cacheStorage ChunkManager, vectorStorage ChunkManager, schema *etcdpb.CollectionMeta, cacheLimit int64, cacheEnable bool) (*VectorChunkManager, error) { func NewVectorChunkManager(ctx context.Context, cacheStorage ChunkManager, vectorStorage ChunkManager, schema *etcdpb.CollectionMeta, cacheLimit int64, cacheEnable bool) (*VectorChunkManager, error) {
insertCodec := NewInsertCodec(schema) insertCodec := NewInsertCodec(schema)
vcm := &VectorChunkManager{ vcm := &VectorChunkManager{
cacheStorage: cacheStorage, cacheStorage: cacheStorage,
@ -74,7 +75,7 @@ func NewVectorChunkManager(cacheStorage ChunkManager, vectorStorage ChunkManager
if err != nil { if err != nil {
log.Error("Unmmap file failed", zap.Any("file", k)) log.Error("Unmmap file failed", zap.Any("file", k))
} }
err = cacheStorage.Remove(k.(string)) err = cacheStorage.Remove(ctx, k.(string))
if err != nil { if err != nil {
log.Error("cache storage remove file failed", zap.Any("file", k)) log.Error("cache storage remove file failed", zap.Any("file", k))
} }
@ -123,31 +124,31 @@ func (vcm *VectorChunkManager) RootPath() string {
// Path returns the path of vector data. If cached, return local path. // Path returns the path of vector data. If cached, return local path.
// If not cached return remote path. // If not cached return remote path.
func (vcm *VectorChunkManager) Path(filePath string) (string, error) { func (vcm *VectorChunkManager) Path(ctx context.Context, filePath string) (string, error) {
return vcm.vectorStorage.Path(filePath) return vcm.vectorStorage.Path(ctx, filePath)
} }
func (vcm *VectorChunkManager) Size(filePath string) (int64, error) { func (vcm *VectorChunkManager) Size(ctx context.Context, filePath string) (int64, error) {
return vcm.vectorStorage.Size(filePath) return vcm.vectorStorage.Size(ctx, filePath)
} }
// Write writes the vector data to local cache if cache enabled. // Write writes the vector data to local cache if cache enabled.
func (vcm *VectorChunkManager) Write(filePath string, content []byte) error { func (vcm *VectorChunkManager) Write(ctx context.Context, filePath string, content []byte) error {
return vcm.vectorStorage.Write(filePath, content) return vcm.vectorStorage.Write(ctx, filePath, content)
} }
// MultiWrite writes the vector data to local cache if cache enabled. // MultiWrite writes the vector data to local cache if cache enabled.
func (vcm *VectorChunkManager) MultiWrite(contents map[string][]byte) error { func (vcm *VectorChunkManager) MultiWrite(ctx context.Context, contents map[string][]byte) error {
return vcm.vectorStorage.MultiWrite(contents) return vcm.vectorStorage.MultiWrite(ctx, contents)
} }
// Exist checks whether vector data is saved to local cache. // Exist checks whether vector data is saved to local cache.
func (vcm *VectorChunkManager) Exist(filePath string) (bool, error) { func (vcm *VectorChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {
return vcm.vectorStorage.Exist(filePath) return vcm.vectorStorage.Exist(ctx, filePath)
} }
func (vcm *VectorChunkManager) readWithCache(filePath string) ([]byte, error) { func (vcm *VectorChunkManager) readWithCache(ctx context.Context, filePath string) ([]byte, error) {
contents, err := vcm.vectorStorage.Read(filePath) contents, err := vcm.vectorStorage.Read(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -155,15 +156,15 @@ func (vcm *VectorChunkManager) readWithCache(filePath string) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = vcm.cacheStorage.Write(filePath, results) err = vcm.cacheStorage.Write(ctx, filePath, results)
if err != nil { if err != nil {
return nil, err return nil, err
} }
r, err := vcm.cacheStorage.Mmap(filePath) r, err := vcm.cacheStorage.Mmap(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
size, err := vcm.cacheStorage.Size(filePath) size, err := vcm.cacheStorage.Size(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -187,7 +188,7 @@ func (vcm *VectorChunkManager) readWithCache(filePath string) ([]byte, error) {
} }
// Read reads the pure vector data. If cached, it reads from local. // Read reads the pure vector data. If cached, it reads from local.
func (vcm *VectorChunkManager) Read(filePath string) ([]byte, error) { func (vcm *VectorChunkManager) Read(ctx context.Context, filePath string) ([]byte, error) {
if vcm.cacheEnable { if vcm.cacheEnable {
if r, ok := vcm.cache.Get(filePath); ok { if r, ok := vcm.cache.Get(filePath); ok {
at := r.(*mmap.ReaderAt) at := r.(*mmap.ReaderAt)
@ -198,9 +199,9 @@ func (vcm *VectorChunkManager) Read(filePath string) ([]byte, error) {
} }
return p, nil return p, nil
} }
return vcm.readWithCache(filePath) return vcm.readWithCache(ctx, filePath)
} }
contents, err := vcm.vectorStorage.Read(filePath) contents, err := vcm.vectorStorage.Read(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -208,10 +209,10 @@ func (vcm *VectorChunkManager) Read(filePath string) ([]byte, error) {
} }
// MultiRead reads the pure vector data. If cached, it reads from local. // MultiRead reads the pure vector data. If cached, it reads from local.
func (vcm *VectorChunkManager) MultiRead(filePaths []string) ([][]byte, error) { func (vcm *VectorChunkManager) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error) {
results := make([][]byte, len(filePaths)) results := make([][]byte, len(filePaths))
for i, filePath := range filePaths { for i, filePath := range filePaths {
content, err := vcm.Read(filePath) content, err := vcm.Read(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -221,23 +222,23 @@ func (vcm *VectorChunkManager) MultiRead(filePaths []string) ([][]byte, error) {
return results, nil return results, nil
} }
func (vcm *VectorChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (vcm *VectorChunkManager) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
filePaths, _, err := vcm.ListWithPrefix(prefix, true) filePaths, _, err := vcm.ListWithPrefix(ctx, prefix, true)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
results, err := vcm.MultiRead(filePaths) results, err := vcm.MultiRead(ctx, filePaths)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
return filePaths, results, nil return filePaths, results, nil
} }
func (vcm *VectorChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (vcm *VectorChunkManager) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
return vcm.vectorStorage.ListWithPrefix(prefix, recursive) return vcm.vectorStorage.ListWithPrefix(ctx, prefix, recursive)
} }
func (vcm *VectorChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) { func (vcm *VectorChunkManager) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error) {
if vcm.cacheEnable && vcm.cache != nil { if vcm.cacheEnable && vcm.cache != nil {
if r, ok := vcm.cache.Get(filePath); ok { if r, ok := vcm.cache.Get(filePath); ok {
return r.(*mmap.ReaderAt), nil return r.(*mmap.ReaderAt), nil
@ -246,12 +247,12 @@ func (vcm *VectorChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) {
return nil, errors.New("the file mmap has not been cached") return nil, errors.New("the file mmap has not been cached")
} }
func (vcm *VectorChunkManager) Reader(filePath string) (FileReader, error) { func (vcm *VectorChunkManager) Reader(ctx context.Context, filePath string) (FileReader, error) {
return nil, errors.New("this method has not been implemented") return nil, errors.New("this method has not been implemented")
} }
// ReadAt reads specific position data of vector. If cached, it reads from local. // ReadAt reads specific position data of vector. If cached, it reads from local.
func (vcm *VectorChunkManager) ReadAt(filePath string, off int64, length int64) ([]byte, error) { func (vcm *VectorChunkManager) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {
if vcm.cacheEnable { if vcm.cacheEnable {
if r, ok := vcm.cache.Get(filePath); ok { if r, ok := vcm.cache.Get(filePath); ok {
at := r.(*mmap.ReaderAt) at := r.(*mmap.ReaderAt)
@ -262,13 +263,13 @@ func (vcm *VectorChunkManager) ReadAt(filePath string, off int64, length int64)
} }
return p, nil return p, nil
} }
results, err := vcm.readWithCache(filePath) results, err := vcm.readWithCache(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return results[off : off+length], nil return results[off : off+length], nil
} }
contents, err := vcm.vectorStorage.Read(filePath) contents, err := vcm.vectorStorage.Read(ctx, filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -288,8 +289,8 @@ func (vcm *VectorChunkManager) ReadAt(filePath string, off int64, length int64)
} }
return p, nil return p, nil
} }
func (vcm *VectorChunkManager) Remove(filePath string) error { func (vcm *VectorChunkManager) Remove(ctx context.Context, filePath string) error {
err := vcm.vectorStorage.Remove(filePath) err := vcm.vectorStorage.Remove(ctx, filePath)
if err != nil { if err != nil {
return err return err
} }
@ -299,8 +300,8 @@ func (vcm *VectorChunkManager) Remove(filePath string) error {
return nil return nil
} }
func (vcm *VectorChunkManager) MultiRemove(filePaths []string) error { func (vcm *VectorChunkManager) MultiRemove(ctx context.Context, filePaths []string) error {
err := vcm.vectorStorage.MultiRemove(filePaths) err := vcm.vectorStorage.MultiRemove(ctx, filePaths)
if err != nil { if err != nil {
return err return err
} }
@ -312,13 +313,13 @@ func (vcm *VectorChunkManager) MultiRemove(filePaths []string) error {
return nil return nil
} }
func (vcm *VectorChunkManager) RemoveWithPrefix(prefix string) error { func (vcm *VectorChunkManager) RemoveWithPrefix(ctx context.Context, prefix string) error {
err := vcm.vectorStorage.RemoveWithPrefix(prefix) err := vcm.vectorStorage.RemoveWithPrefix(ctx, prefix)
if err != nil { if err != nil {
return err return err
} }
if vcm.cacheEnable { if vcm.cacheEnable {
filePaths, _, err := vcm.ListWithPrefix(prefix, true) filePaths, _, err := vcm.ListWithPrefix(ctx, prefix, true)
if err != nil { if err != nil {
return err return err
} }

View File

@ -130,7 +130,7 @@ func buildVectorChunkManager(localPath string, localCacheEnable bool) (*VectorCh
lcm := NewLocalChunkManager(RootPath(localPath)) lcm := NewLocalChunkManager(RootPath(localPath))
meta := initMeta() meta := initMeta()
vcm, err := NewVectorChunkManager(lcm, rcm, meta, 16, localCacheEnable) vcm, err := NewVectorChunkManager(ctx, lcm, rcm, meta, 16, localCacheEnable)
if err != nil { if err != nil {
return nil, cancel, err return nil, cancel, err
} }
@ -161,17 +161,19 @@ func TestNewVectorChunkManager(t *testing.T) {
lcm := NewLocalChunkManager(RootPath(localPath)) lcm := NewLocalChunkManager(RootPath(localPath))
meta := initMeta() meta := initMeta()
vcm, err := NewVectorChunkManager(lcm, rcm, meta, 16, true) vcm, err := NewVectorChunkManager(ctx, lcm, rcm, meta, 16, true)
assert.Equal(t, "", vcm.RootPath()) assert.Equal(t, "", vcm.RootPath())
assert.Nil(t, err) assert.Nil(t, err)
assert.NotNil(t, vcm) assert.NotNil(t, vcm)
vcm, err = NewVectorChunkManager(lcm, rcm, meta, -1, true) vcm, err = NewVectorChunkManager(ctx, lcm, rcm, meta, -1, true)
assert.NotNil(t, err) assert.NotNil(t, err)
assert.Nil(t, vcm) assert.Nil(t, vcm)
} }
func TestVectorChunkManager_GetPath(t *testing.T) { func TestVectorChunkManager_GetPath(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
localCaches := []bool{true, false} localCaches := []bool{true, false}
for _, localCache := range localCaches { for _, localCache := range localCaches {
vcm, cancel, err := buildVectorChunkManager(localPath, localCache) vcm, cancel, err := buildVectorChunkManager(localPath, localCache)
@ -179,19 +181,19 @@ func TestVectorChunkManager_GetPath(t *testing.T) {
assert.NotNil(t, vcm) assert.NotNil(t, vcm)
key := "1" key := "1"
err = vcm.Write(key, []byte{1}) err = vcm.Write(ctx, key, []byte{1})
assert.Nil(t, err) assert.Nil(t, err)
pathGet, err := vcm.Path(key) pathGet, err := vcm.Path(ctx, key)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, pathGet, key) assert.Equal(t, pathGet, key)
err = vcm.cacheStorage.Write(key, []byte{1}) err = vcm.cacheStorage.Write(ctx, key, []byte{1})
assert.Nil(t, err) assert.Nil(t, err)
pathGet, err = vcm.Path(key) pathGet, err = vcm.Path(ctx, key)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, pathGet, key) assert.Equal(t, pathGet, key)
err = vcm.RemoveWithPrefix(localPath) err = vcm.RemoveWithPrefix(ctx, localPath)
assert.NoError(t, err) assert.NoError(t, err)
cancel() cancel()
vcm.Close() vcm.Close()
@ -199,6 +201,8 @@ func TestVectorChunkManager_GetPath(t *testing.T) {
} }
func TestVectorChunkManager_GetSize(t *testing.T) { func TestVectorChunkManager_GetSize(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
localCaches := []bool{true, false} localCaches := []bool{true, false}
for _, localCache := range localCaches { for _, localCache := range localCaches {
vcm, cancel, err := buildVectorChunkManager(localPath, localCache) vcm, cancel, err := buildVectorChunkManager(localPath, localCache)
@ -206,19 +210,19 @@ func TestVectorChunkManager_GetSize(t *testing.T) {
assert.NotNil(t, vcm) assert.NotNil(t, vcm)
key := "1" key := "1"
err = vcm.Write(key, []byte{1}) err = vcm.Write(ctx, key, []byte{1})
assert.Nil(t, err) assert.Nil(t, err)
sizeGet, err := vcm.Size(key) sizeGet, err := vcm.Size(ctx, key)
assert.Nil(t, err) assert.Nil(t, err)
assert.EqualValues(t, sizeGet, 1) assert.EqualValues(t, sizeGet, 1)
err = vcm.cacheStorage.Write(key, []byte{1}) err = vcm.cacheStorage.Write(ctx, key, []byte{1})
assert.Nil(t, err) assert.Nil(t, err)
sizeGet, err = vcm.Size(key) sizeGet, err = vcm.Size(ctx, key)
assert.Nil(t, err) assert.Nil(t, err)
assert.EqualValues(t, sizeGet, 1) assert.EqualValues(t, sizeGet, 1)
err = vcm.RemoveWithPrefix(localPath) err = vcm.RemoveWithPrefix(ctx, localPath)
assert.NoError(t, err) assert.NoError(t, err)
cancel() cancel()
vcm.Close() vcm.Close()
@ -226,6 +230,9 @@ func TestVectorChunkManager_GetSize(t *testing.T) {
} }
func TestVectorChunkManager_Write(t *testing.T) { func TestVectorChunkManager_Write(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
localCaches := []bool{true, false} localCaches := []bool{true, false}
for _, localCache := range localCaches { for _, localCache := range localCaches {
vcm, cancel, err := buildVectorChunkManager(localPath, localCache) vcm, cancel, err := buildVectorChunkManager(localPath, localCache)
@ -233,10 +240,10 @@ func TestVectorChunkManager_Write(t *testing.T) {
assert.NotNil(t, vcm) assert.NotNil(t, vcm)
key := "1" key := "1"
err = vcm.Write(key, []byte{1}) err = vcm.Write(ctx, key, []byte{1})
assert.Nil(t, err) assert.Nil(t, err)
exist, err := vcm.Exist(key) exist, err := vcm.Exist(ctx, key)
assert.True(t, exist) assert.True(t, exist)
assert.NoError(t, err) assert.NoError(t, err)
@ -244,17 +251,17 @@ func TestVectorChunkManager_Write(t *testing.T) {
"key_1": {111}, "key_1": {111},
"key_2": {222}, "key_2": {222},
} }
err = vcm.MultiWrite(contents) err = vcm.MultiWrite(ctx, contents)
assert.NoError(t, err) assert.NoError(t, err)
exist, err = vcm.Exist("key_1") exist, err = vcm.Exist(ctx, "key_1")
assert.True(t, exist) assert.True(t, exist)
assert.NoError(t, err) assert.NoError(t, err)
exist, err = vcm.Exist("key_2") exist, err = vcm.Exist(ctx, "key_2")
assert.True(t, exist) assert.True(t, exist)
assert.NoError(t, err) assert.NoError(t, err)
err = vcm.RemoveWithPrefix(localPath) err = vcm.RemoveWithPrefix(ctx, localPath)
assert.NoError(t, err) assert.NoError(t, err)
cancel() cancel()
vcm.Close() vcm.Close()
@ -262,6 +269,9 @@ func TestVectorChunkManager_Write(t *testing.T) {
} }
func TestVectorChunkManager_Remove(t *testing.T) { func TestVectorChunkManager_Remove(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
localCaches := []bool{true, false} localCaches := []bool{true, false}
for _, localCache := range localCaches { for _, localCache := range localCaches {
vcm, cancel, err := buildVectorChunkManager(localPath, localCache) vcm, cancel, err := buildVectorChunkManager(localPath, localCache)
@ -269,13 +279,13 @@ func TestVectorChunkManager_Remove(t *testing.T) {
assert.NotNil(t, vcm) assert.NotNil(t, vcm)
key := "1" key := "1"
err = vcm.cacheStorage.Write(key, []byte{1}) err = vcm.cacheStorage.Write(ctx, key, []byte{1})
assert.Nil(t, err) assert.Nil(t, err)
err = vcm.Remove(key) err = vcm.Remove(ctx, key)
assert.Nil(t, err) assert.Nil(t, err)
exist, err := vcm.Exist(key) exist, err := vcm.Exist(ctx, key)
assert.False(t, exist) assert.False(t, exist)
assert.NoError(t, err) assert.NoError(t, err)
@ -283,20 +293,20 @@ func TestVectorChunkManager_Remove(t *testing.T) {
"key_1": {111}, "key_1": {111},
"key_2": {222}, "key_2": {222},
} }
err = vcm.cacheStorage.MultiWrite(contents) err = vcm.cacheStorage.MultiWrite(ctx, contents)
assert.NoError(t, err) assert.NoError(t, err)
err = vcm.MultiRemove([]string{"key_1", "key_2"}) err = vcm.MultiRemove(ctx, []string{"key_1", "key_2"})
assert.NoError(t, err) assert.NoError(t, err)
exist, err = vcm.Exist("key_1") exist, err = vcm.Exist(ctx, "key_1")
assert.False(t, exist) assert.False(t, exist)
assert.NoError(t, err) assert.NoError(t, err)
exist, err = vcm.Exist("key_2") exist, err = vcm.Exist(ctx, "key_2")
assert.False(t, exist) assert.False(t, exist)
assert.NoError(t, err) assert.NoError(t, err)
err = vcm.RemoveWithPrefix(localPath) err = vcm.RemoveWithPrefix(ctx, localPath)
assert.NoError(t, err) assert.NoError(t, err)
cancel() cancel()
vcm.Close() vcm.Close()
@ -308,20 +318,20 @@ type mockFailedChunkManager struct {
ChunkManager ChunkManager
} }
func (m *mockFailedChunkManager) Remove(key string) error { func (m *mockFailedChunkManager) Remove(ctx context.Context, key string) error {
if m.fail { if m.fail {
return errors.New("remove error") return errors.New("remove error")
} }
return nil return nil
} }
func (m *mockFailedChunkManager) RemoveWithPrefix(prefix string) error { func (m *mockFailedChunkManager) RemoveWithPrefix(ctx context.Context, prefix string) error {
if m.fail { if m.fail {
return errors.New("remove with prefix error") return errors.New("remove with prefix error")
} }
return nil return nil
} }
func (m *mockFailedChunkManager) MultiRemove(key []string) error { func (m *mockFailedChunkManager) MultiRemove(ctx context.Context, key []string) error {
if m.fail { if m.fail {
return errors.New("multi remove error") return errors.New("multi remove error")
} }
@ -329,23 +339,29 @@ func (m *mockFailedChunkManager) MultiRemove(key []string) error {
} }
func TestVectorChunkManager_Remove_Fail(t *testing.T) { func TestVectorChunkManager_Remove_Fail(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vcm := &VectorChunkManager{ vcm := &VectorChunkManager{
vectorStorage: &mockFailedChunkManager{fail: true}, vectorStorage: &mockFailedChunkManager{fail: true},
cacheStorage: &mockFailedChunkManager{fail: true}, cacheStorage: &mockFailedChunkManager{fail: true},
} }
assert.Error(t, vcm.Remove("test")) assert.Error(t, vcm.Remove(ctx, "test"))
assert.Error(t, vcm.MultiRemove([]string{"test"})) assert.Error(t, vcm.MultiRemove(ctx, []string{"test"}))
assert.Error(t, vcm.RemoveWithPrefix("test")) assert.Error(t, vcm.RemoveWithPrefix(ctx, "test"))
} }
func TestVectorChunkManager_Read(t *testing.T) { func TestVectorChunkManager_Read(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
localCaches := []bool{true, false} localCaches := []bool{true, false}
for _, localCache := range localCaches { for _, localCache := range localCaches {
vcm, cancel, err := buildVectorChunkManager(localPath, localCache) vcm, cancel, err := buildVectorChunkManager(localPath, localCache)
assert.NotNil(t, vcm) assert.NotNil(t, vcm)
assert.NoError(t, err) assert.NoError(t, err)
content, err := vcm.Read("9999") content, err := vcm.Read(ctx, "9999")
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, content) assert.Nil(t, content)
@ -353,15 +369,15 @@ func TestVectorChunkManager_Read(t *testing.T) {
binlogs := initBinlogFile(meta) binlogs := initBinlogFile(meta)
assert.NotNil(t, binlogs) assert.NotNil(t, binlogs)
for _, binlog := range binlogs { for _, binlog := range binlogs {
err := vcm.vectorStorage.Write(binlog.Key, binlog.Value) err := vcm.vectorStorage.Write(ctx, binlog.Key, binlog.Value)
assert.Nil(t, err) assert.Nil(t, err)
} }
content, err = vcm.Read("108") content, err = vcm.Read(ctx, "108")
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, []byte{0, 255}, content) assert.Equal(t, []byte{0, 255}, content)
content, err = vcm.Read("109") content, err = vcm.Read(ctx, "109")
assert.Nil(t, err) assert.Nil(t, err)
floatResult := make([]float32, 0) floatResult := make([]float32, 0)
for i := 0; i < len(content)/4; i++ { for i := 0; i < len(content)/4; i++ {
@ -370,7 +386,7 @@ func TestVectorChunkManager_Read(t *testing.T) {
} }
assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult) assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
contents, err := vcm.MultiRead([]string{"108", "109"}) contents, err := vcm.MultiRead(ctx, []string{"108", "109"})
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, []byte{0, 255}, contents[0]) assert.Equal(t, []byte{0, 255}, contents[0])
@ -381,7 +397,7 @@ func TestVectorChunkManager_Read(t *testing.T) {
} }
assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult) assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
keys, contents, err := vcm.ReadWithPrefix("10") keys, contents, err := vcm.ReadWithPrefix(ctx, "10")
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "101", keys[0]) assert.Equal(t, "101", keys[0])
assert.Equal(t, []byte{3, 4}, contents[0]) assert.Equal(t, []byte{3, 4}, contents[0])
@ -398,7 +414,7 @@ func TestVectorChunkManager_Read(t *testing.T) {
assert.Equal(t, "109", keys[2]) assert.Equal(t, "109", keys[2])
assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult) assert.Equal(t, []float32{0, 1, 2, 3, 4, 5, 6, 7, 0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
content, err = vcm.ReadAt("109", 8*4, 8*4) content, err = vcm.ReadAt(ctx, "109", 8*4, 8*4)
assert.Nil(t, err) assert.Nil(t, err)
floatResult = make([]float32, 0) floatResult = make([]float32, 0)
@ -408,36 +424,36 @@ func TestVectorChunkManager_Read(t *testing.T) {
} }
assert.Equal(t, []float32{0, 111, 222, 333, 444, 555, 777, 666}, floatResult) assert.Equal(t, []float32{0, 111, 222, 333, 444, 555, 777, 666}, floatResult)
content, err = vcm.ReadAt("9999", 0, 8*4) content, err = vcm.ReadAt(ctx, "9999", 0, 8*4)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, content) assert.Nil(t, content)
content, err = vcm.ReadAt("109", 8*4, 8*4) content, err = vcm.ReadAt(ctx, "109", 8*4, 8*4)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 32, len(content)) assert.Equal(t, 32, len(content))
if localCache { if localCache {
r, err := vcm.Mmap("109") r, err := vcm.Mmap(ctx, "109")
assert.Nil(t, err) assert.Nil(t, err)
p := make([]byte, 32) p := make([]byte, 32)
n, err := r.ReadAt(p, 32) n, err := r.ReadAt(p, 32)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, n, 32) assert.Equal(t, n, 32)
r, err = vcm.Mmap("not exist") r, err = vcm.Mmap(ctx, "not exist")
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, nil) assert.Nil(t, nil)
} }
content, err = vcm.ReadAt("109", 9999, 8*4) content, err = vcm.ReadAt(ctx, "109", 9999, 8*4)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, content) assert.Nil(t, content)
content, err = vcm.ReadAt("9999", 0, 8*4) content, err = vcm.ReadAt(ctx, "9999", 0, 8*4)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, content) assert.Nil(t, content)
err = vcm.RemoveWithPrefix(localPath) err = vcm.RemoveWithPrefix(ctx, localPath)
assert.NoError(t, err) assert.NoError(t, err)
cancel() cancel()

View File

@ -140,7 +140,8 @@ func (p *ImportWrapper) fileValidation(filePaths []string, rowBased bool) error
} }
// check file size // check file size
size, _ := p.chunkManager.Size(filePath) // TODO add context
size, _ := p.chunkManager.Size(context.TODO(), filePath)
if size == 0 { if size == 0 {
return errors.New("the file " + filePath + " is empty") return errors.New("the file " + filePath + " is empty")
} }
@ -271,9 +272,12 @@ func (p *ImportWrapper) Import(filePaths []string, rowBased bool, onlyValidate b
func (p *ImportWrapper) parseRowBasedJSON(filePath string, onlyValidate bool) error { func (p *ImportWrapper) parseRowBasedJSON(filePath string, onlyValidate bool) error {
tr := timerecord.NewTimeRecorder("json row-based parser: " + filePath) tr := timerecord.NewTimeRecorder("json row-based parser: " + filePath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// for minio storage, chunkManager will download file into local memory // for minio storage, chunkManager will download file into local memory
// for local storage, chunkManager open the file directly // for local storage, chunkManager open the file directly
file, err := p.chunkManager.Reader(filePath) file, err := p.chunkManager.Reader(ctx, filePath)
if err != nil { if err != nil {
return err return err
} }
@ -317,9 +321,12 @@ func (p *ImportWrapper) parseColumnBasedJSON(filePath string, onlyValidate bool,
combineFunc func(fields map[storage.FieldID]storage.FieldData) error) error { combineFunc func(fields map[storage.FieldID]storage.FieldData) error) error {
tr := timerecord.NewTimeRecorder("json column-based parser: " + filePath) tr := timerecord.NewTimeRecorder("json column-based parser: " + filePath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// for minio storage, chunkManager will download file into local memory // for minio storage, chunkManager will download file into local memory
// for local storage, chunkManager open the file directly // for local storage, chunkManager open the file directly
file, err := p.chunkManager.Reader(filePath) file, err := p.chunkManager.Reader(ctx, filePath)
if err != nil { if err != nil {
return err return err
} }
@ -353,11 +360,13 @@ func (p *ImportWrapper) parseColumnBasedNumpy(filePath string, onlyValidate bool
combineFunc func(fields map[storage.FieldID]storage.FieldData) error) error { combineFunc func(fields map[storage.FieldID]storage.FieldData) error) error {
tr := timerecord.NewTimeRecorder("numpy parser: " + filePath) tr := timerecord.NewTimeRecorder("numpy parser: " + filePath)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fileName, _ := getFileNameAndExt(filePath) fileName, _ := getFileNameAndExt(filePath)
// for minio storage, chunkManager will download file into local memory // for minio storage, chunkManager will download file into local memory
// for local storage, chunkManager open the file directly // for local storage, chunkManager open the file directly
file, err := p.chunkManager.Reader(filePath) file, err := p.chunkManager.Reader(ctx, filePath)
if err != nil { if err != nil {
return err return err
} }

View File

@ -36,63 +36,63 @@ func (mc *MockChunkManager) RootPath() string {
return TempFilesPath return TempFilesPath
} }
func (mc *MockChunkManager) Path(filePath string) (string, error) { func (mc *MockChunkManager) Path(ctx context.Context, filePath string) (string, error) {
return "", nil return "", nil
} }
func (mc *MockChunkManager) Reader(filePath string) (storage.FileReader, error) { func (mc *MockChunkManager) Reader(ctx context.Context, filePath string) (storage.FileReader, error) {
return nil, nil return nil, nil
} }
func (mc *MockChunkManager) Write(filePath string, content []byte) error { func (mc *MockChunkManager) Write(ctx context.Context, filePath string, content []byte) error {
return nil return nil
} }
func (mc *MockChunkManager) MultiWrite(contents map[string][]byte) error { func (mc *MockChunkManager) MultiWrite(ctx context.Context, contents map[string][]byte) error {
return nil return nil
} }
func (mc *MockChunkManager) Exist(filePath string) (bool, error) { func (mc *MockChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {
return true, nil return true, nil
} }
func (mc *MockChunkManager) Read(filePath string) ([]byte, error) { func (mc *MockChunkManager) Read(ctx context.Context, filePath string) ([]byte, error) {
return nil, nil return nil, nil
} }
func (mc *MockChunkManager) MultiRead(filePaths []string) ([][]byte, error) { func (mc *MockChunkManager) MultiRead(ctx context.Context, filePaths []string) ([][]byte, error) {
return nil, nil return nil, nil
} }
func (mc *MockChunkManager) ListWithPrefix(prefix string, recursive bool) ([]string, []time.Time, error) { func (mc *MockChunkManager) ListWithPrefix(ctx context.Context, prefix string, recursive bool) ([]string, []time.Time, error) {
return nil, nil, nil return nil, nil, nil
} }
func (mc *MockChunkManager) ReadWithPrefix(prefix string) ([]string, [][]byte, error) { func (mc *MockChunkManager) ReadWithPrefix(ctx context.Context, prefix string) ([]string, [][]byte, error) {
return nil, nil, nil return nil, nil, nil
} }
func (mc *MockChunkManager) ReadAt(filePath string, off int64, length int64) ([]byte, error) { func (mc *MockChunkManager) ReadAt(ctx context.Context, filePath string, off int64, length int64) ([]byte, error) {
return nil, nil return nil, nil
} }
func (mc *MockChunkManager) Mmap(filePath string) (*mmap.ReaderAt, error) { func (mc *MockChunkManager) Mmap(ctx context.Context, filePath string) (*mmap.ReaderAt, error) {
return nil, nil return nil, nil
} }
func (mc *MockChunkManager) Size(filePath string) (int64, error) { func (mc *MockChunkManager) Size(ctx context.Context, filePath string) (int64, error) {
return mc.size, nil return mc.size, nil
} }
func (mc *MockChunkManager) Remove(filePath string) error { func (mc *MockChunkManager) Remove(ctx context.Context, filePath string) error {
return nil return nil
} }
func (mc *MockChunkManager) MultiRemove(filePaths []string) error { func (mc *MockChunkManager) MultiRemove(ctx context.Context, filePaths []string) error {
return nil return nil
} }
func (mc *MockChunkManager) RemoveWithPrefix(prefix string) error { func (mc *MockChunkManager) RemoveWithPrefix(ctx context.Context, prefix string) error {
return nil return nil
} }
@ -146,9 +146,9 @@ func Test_ImportRowBased(t *testing.T) {
}`) }`)
filePath := TempFilesPath + "rows_1.json" filePath := TempFilesPath + "rows_1.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
rowCount := 0 rowCount := 0
flushFunc := func(fields map[storage.FieldID]storage.FieldData, shardNum int) error { flushFunc := func(fields map[storage.FieldID]storage.FieldData, shardNum int) error {
@ -196,7 +196,7 @@ func Test_ImportRowBased(t *testing.T) {
}`) }`)
filePath = TempFilesPath + "rows_2.json" filePath = TempFilesPath + "rows_2.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
importResult.State = commonpb.ImportState_ImportStarted importResult.State = commonpb.ImportState_ImportStarted
@ -219,7 +219,7 @@ func Test_ImportColumnBased_json(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -249,7 +249,7 @@ func Test_ImportColumnBased_json(t *testing.T) {
}`) }`)
filePath := TempFilesPath + "columns_1.json" filePath := TempFilesPath + "columns_1.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
rowCount := 0 rowCount := 0
@ -296,7 +296,7 @@ func Test_ImportColumnBased_json(t *testing.T) {
}`) }`)
filePath = TempFilesPath + "rows_2.json" filePath = TempFilesPath + "rows_2.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
importResult.State = commonpb.ImportState_ImportStarted importResult.State = commonpb.ImportState_ImportStarted
@ -319,7 +319,7 @@ func Test_ImportColumnBased_StringKey(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -339,7 +339,7 @@ func Test_ImportColumnBased_StringKey(t *testing.T) {
}`) }`)
filePath := TempFilesPath + "columns_2.json" filePath := TempFilesPath + "columns_2.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
rowCount := 0 rowCount := 0
@ -386,7 +386,7 @@ func Test_ImportColumnBased_numpy(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -404,7 +404,7 @@ func Test_ImportColumnBased_numpy(t *testing.T) {
files := make([]string, 0) files := make([]string, 0)
filePath := TempFilesPath + "scalar_fields.json" filePath := TempFilesPath + "scalar_fields.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
files = append(files, filePath) files = append(files, filePath)
@ -413,7 +413,7 @@ func Test_ImportColumnBased_numpy(t *testing.T) {
content, err = CreateNumpyData(bin) content, err = CreateNumpyData(bin)
assert.Nil(t, err) assert.Nil(t, err)
log.Debug("content", zap.Any("c", content)) log.Debug("content", zap.Any("c", content))
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
files = append(files, filePath) files = append(files, filePath)
@ -422,7 +422,7 @@ func Test_ImportColumnBased_numpy(t *testing.T) {
content, err = CreateNumpyData(flo) content, err = CreateNumpyData(flo)
assert.Nil(t, err) assert.Nil(t, err)
log.Debug("content", zap.Any("c", content)) log.Debug("content", zap.Any("c", content))
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
files = append(files, filePath) files = append(files, filePath)
@ -471,7 +471,7 @@ func Test_ImportColumnBased_numpy(t *testing.T) {
}`) }`)
filePath = TempFilesPath + "rows_2.json" filePath = TempFilesPath + "rows_2.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
importResult.State = commonpb.ImportState_ImportStarted importResult.State = commonpb.ImportState_ImportStarted
@ -524,7 +524,7 @@ func Test_ImportRowBased_perf(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -573,7 +573,7 @@ func Test_ImportRowBased_perf(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
err = bw.Flush() err = bw.Flush()
assert.NoError(t, err) assert.NoError(t, err)
err = cm.Write(filePath, b.Bytes()) err = cm.Write(ctx, filePath, b.Bytes())
assert.NoError(t, err) assert.NoError(t, err)
}() }()
tr.Record("generate large json file " + filePath) tr.Record("generate large json file " + filePath)
@ -625,7 +625,7 @@ func Test_ImportColumnBased_perf(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -675,7 +675,7 @@ func Test_ImportColumnBased_perf(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
err = bw.Flush() err = bw.Flush()
assert.NoError(t, err) assert.NoError(t, err)
err = cm.Write(filePath, b.Bytes()) err = cm.Write(ctx, filePath, b.Bytes())
assert.NoError(t, err) assert.NoError(t, err)
return nil return nil
} }
@ -824,9 +824,9 @@ func Test_ReportImportFailRowBased(t *testing.T) {
}`) }`)
filePath := TempFilesPath + "rows_1.json" filePath := TempFilesPath + "rows_1.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
rowCount := 0 rowCount := 0
flushFunc := func(fields map[storage.FieldID]storage.FieldData, shardNum int) error { flushFunc := func(fields map[storage.FieldID]storage.FieldData, shardNum int) error {
@ -876,7 +876,7 @@ func Test_ReportImportFailColumnBased_json(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -906,7 +906,7 @@ func Test_ReportImportFailColumnBased_json(t *testing.T) {
}`) }`)
filePath := TempFilesPath + "columns_1.json" filePath := TempFilesPath + "columns_1.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
rowCount := 0 rowCount := 0
@ -957,7 +957,7 @@ func Test_ReportImportFailColumnBased_numpy(t *testing.T) {
ctx := context.Background() ctx := context.Background()
cm, err := f.NewPersistentStorageChunkManager(ctx) cm, err := f.NewPersistentStorageChunkManager(ctx)
assert.NoError(t, err) assert.NoError(t, err)
defer cm.RemoveWithPrefix("") defer cm.RemoveWithPrefix(ctx, "")
idAllocator := newIDAllocator(ctx, t) idAllocator := newIDAllocator(ctx, t)
@ -975,7 +975,7 @@ func Test_ReportImportFailColumnBased_numpy(t *testing.T) {
files := make([]string, 0) files := make([]string, 0)
filePath := TempFilesPath + "scalar_fields.json" filePath := TempFilesPath + "scalar_fields.json"
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
files = append(files, filePath) files = append(files, filePath)
@ -984,7 +984,7 @@ func Test_ReportImportFailColumnBased_numpy(t *testing.T) {
content, err = CreateNumpyData(bin) content, err = CreateNumpyData(bin)
assert.Nil(t, err) assert.Nil(t, err)
log.Debug("content", zap.Any("c", content)) log.Debug("content", zap.Any("c", content))
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
files = append(files, filePath) files = append(files, filePath)
@ -993,7 +993,7 @@ func Test_ReportImportFailColumnBased_numpy(t *testing.T) {
content, err = CreateNumpyData(flo) content, err = CreateNumpyData(flo)
assert.Nil(t, err) assert.Nil(t, err)
log.Debug("content", zap.Any("c", content)) log.Debug("content", zap.Any("c", content))
err = cm.Write(filePath, content) err = cm.Write(ctx, filePath, content)
assert.NoError(t, err) assert.NoError(t, err)
files = append(files, filePath) files = append(files, filePath)