mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-08 01:58:34 +08:00
Upgrade knowhere to 1.3.4 (#20914)
Signed-off-by: Li Liu <li.liu@zilliz.com> Signed-off-by: Li Liu <li.liu@zilliz.com>
This commit is contained in:
parent
40abb13413
commit
aecf2e4f4b
@ -21,6 +21,7 @@
|
|||||||
#include "easyloggingpp/easylogging++.h"
|
#include "easyloggingpp/easylogging++.h"
|
||||||
#include "log/Log.h"
|
#include "log/Log.h"
|
||||||
#include "knowhere/archive/KnowhereConfig.h"
|
#include "knowhere/archive/KnowhereConfig.h"
|
||||||
|
#include "knowhere/common/ThreadPool.h"
|
||||||
|
|
||||||
namespace milvus::config {
|
namespace milvus::config {
|
||||||
|
|
||||||
@ -73,4 +74,9 @@ KnowhereSetSimdType(const char* value) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
KnowhereInitThreadPool(const uint32_t num_threads) {
|
||||||
|
knowhere::ThreadPool::InitGlobalThreadPool(num_threads);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace milvus::config
|
} // namespace milvus::config
|
||||||
|
|||||||
@ -25,4 +25,7 @@ KnowhereInitImpl(const char*);
|
|||||||
std::string
|
std::string
|
||||||
KnowhereSetSimdType(const char*);
|
KnowhereSetSimdType(const char*);
|
||||||
|
|
||||||
|
void
|
||||||
|
KnowhereInitThreadPool(const uint32_t);
|
||||||
|
|
||||||
} // namespace milvus::config
|
} // namespace milvus::config
|
||||||
|
|||||||
@ -39,6 +39,11 @@ SegcoreSetNprobe(const int64_t value) {
|
|||||||
config.set_nprobe(value);
|
config.set_nprobe(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern "C" void
|
||||||
|
SegcoreSetThreadPoolNum(const uint32_t num_threads) {
|
||||||
|
milvus::config::KnowhereInitThreadPool(num_threads);
|
||||||
|
}
|
||||||
|
|
||||||
// return value must be freed by the caller
|
// return value must be freed by the caller
|
||||||
extern "C" char*
|
extern "C" char*
|
||||||
SegcoreSetSimdType(const char* value) {
|
SegcoreSetSimdType(const char* value) {
|
||||||
|
|||||||
@ -31,6 +31,9 @@ SegcoreSetNprobe(const int64_t);
|
|||||||
char*
|
char*
|
||||||
SegcoreSetSimdType(const char*);
|
SegcoreSetSimdType(const char*);
|
||||||
|
|
||||||
|
void
|
||||||
|
SegcoreSetThreadPoolNum(const uint32_t num_threads);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -11,8 +11,8 @@
|
|||||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||||
#-------------------------------------------------------------------------------
|
#-------------------------------------------------------------------------------
|
||||||
|
|
||||||
set( KNOWHERE_VERSION v1.3.3 )
|
set( KNOWHERE_VERSION v1.3.4 )
|
||||||
set( KNOWHERE_SOURCE_MD5 "241bd99371b0a9a45cab0451cc0380d4")
|
set( KNOWHERE_SOURCE_MD5 "1a6dcbf87b74940c95dd0d3b3f04f541")
|
||||||
|
|
||||||
if ( DEFINED ENV{MILVUS_KNOWHERE_URL} )
|
if ( DEFINED ENV{MILVUS_KNOWHERE_URL} )
|
||||||
set( KNOWHERE_SOURCE_URL "$ENV{MILVUS_KNOWHERE_URL}" )
|
set( KNOWHERE_SOURCE_URL "$ENV{MILVUS_KNOWHERE_URL}" )
|
||||||
|
|||||||
@ -29,18 +29,15 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
@ -50,7 +47,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/log"
|
"github.com/milvus-io/milvus/internal/log"
|
||||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/types"
|
"github.com/milvus-io/milvus/internal/types"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||||
"github.com/milvus-io/milvus/internal/util/hardware"
|
"github.com/milvus-io/milvus/internal/util/hardware"
|
||||||
"github.com/milvus-io/milvus/internal/util/initcore"
|
"github.com/milvus-io/milvus/internal/util/initcore"
|
||||||
@ -101,8 +97,6 @@ type IndexNode struct {
|
|||||||
initOnce sync.Once
|
initOnce sync.Once
|
||||||
stateLock sync.Mutex
|
stateLock sync.Mutex
|
||||||
tasks map[taskKey]*taskInfo
|
tasks map[taskKey]*taskInfo
|
||||||
|
|
||||||
cgoPool *concurrency.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIndexNode creates a new IndexNode component.
|
// NewIndexNode creates a new IndexNode component.
|
||||||
@ -203,27 +197,6 @@ func (i *IndexNode) Init() error {
|
|||||||
i.closer = trace.InitTracing("index_node")
|
i.closer = trace.InitTracing("index_node")
|
||||||
|
|
||||||
i.initKnowhere()
|
i.initKnowhere()
|
||||||
|
|
||||||
// IndexNode will not execute tasks concurrently, so the size of goroutines pool is 1.
|
|
||||||
i.cgoPool, err = concurrency.NewPool(1, ants.WithPreAlloc(true),
|
|
||||||
ants.WithExpiryDuration(math.MaxInt64))
|
|
||||||
if err != nil {
|
|
||||||
log.Error("IndexNode init cgo pool failed", zap.Error(err))
|
|
||||||
initErr = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := make(chan struct{})
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(1)
|
|
||||||
i.cgoPool.Submit(func() (interface{}, error) {
|
|
||||||
runtime.LockOSThread()
|
|
||||||
wg.Done()
|
|
||||||
<-sig
|
|
||||||
return nil, nil
|
|
||||||
})
|
|
||||||
wg.Wait()
|
|
||||||
close(sig)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
log.Info("Init IndexNode finished", zap.Error(initErr))
|
log.Info("Init IndexNode finished", zap.Error(initErr))
|
||||||
@ -338,7 +311,7 @@ func (i *IndexNode) GetNodeID() int64 {
|
|||||||
return paramtable.GetNodeID()
|
return paramtable.GetNodeID()
|
||||||
}
|
}
|
||||||
|
|
||||||
//ShowConfigurations returns the configurations of indexNode matching req.Pattern
|
// ShowConfigurations returns the configurations of indexNode matching req.Pattern
|
||||||
func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
|
||||||
if !i.isHealthy() {
|
if !i.isHealthy() {
|
||||||
log.Warn("IndexNode.ShowConfigurations failed",
|
log.Warn("IndexNode.ShowConfigurations failed",
|
||||||
|
|||||||
@ -94,7 +94,6 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
|
|||||||
nodeID: i.GetNodeID(),
|
nodeID: i.GetNodeID(),
|
||||||
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %s", req.BuildID, req.ClusterID)),
|
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %s", req.BuildID, req.ClusterID)),
|
||||||
serializedSize: 0,
|
serializedSize: 0,
|
||||||
pool: i.cgoPool,
|
|
||||||
}
|
}
|
||||||
ret := &commonpb.Status{
|
ret := &commonpb.Status{
|
||||||
ErrorCode: commonpb.ErrorCode_Success,
|
ErrorCode: commonpb.ErrorCode_Success,
|
||||||
|
|||||||
@ -34,7 +34,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/metrics"
|
"github.com/milvus-io/milvus/internal/metrics"
|
||||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||||
"github.com/milvus-io/milvus/internal/storage"
|
"github.com/milvus-io/milvus/internal/storage"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/indexcgowrapper"
|
"github.com/milvus-io/milvus/internal/util/indexcgowrapper"
|
||||||
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
|
"github.com/milvus-io/milvus/internal/util/indexparamcheck"
|
||||||
@ -101,7 +100,6 @@ type indexBuildTask struct {
|
|||||||
tr *timerecord.TimeRecorder
|
tr *timerecord.TimeRecorder
|
||||||
statistic indexpb.JobInfo
|
statistic indexpb.JobInfo
|
||||||
node *IndexNode
|
node *IndexNode
|
||||||
pool *concurrency.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *indexBuildTask) Reset() {
|
func (it *indexBuildTask) Reset() {
|
||||||
@ -249,18 +247,11 @@ func (it *indexBuildTask) BuildIndex(ctx context.Context) error {
|
|||||||
dType := dataset.DType
|
dType := dataset.DType
|
||||||
var err error
|
var err error
|
||||||
if dType != schemapb.DataType_None {
|
if dType != schemapb.DataType_None {
|
||||||
_, err = it.pool.Submit(func() (interface{}, error) {
|
it.index, err = indexcgowrapper.NewCgoIndex(dType, it.newTypeParams, it.newIndexParams, it.req.GetStorageConfig())
|
||||||
it.index, err = indexcgowrapper.NewCgoIndex(dType, it.newTypeParams, it.newIndexParams, it.req.GetStorageConfig())
|
if err == nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = it.index.Build(dataset)
|
err = it.index.Build(dataset)
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Ctx(ctx).Error("failed to build index", zap.Error(err))
|
log.Ctx(ctx).Error("failed to build index", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
@ -365,19 +356,13 @@ func (it *indexBuildTask) BuildDiskAnnIndex(ctx context.Context) error {
|
|||||||
zap.Int64("buildID", it.BuildID),
|
zap.Int64("buildID", it.BuildID),
|
||||||
zap.String("index params", string(jsonIndexParams)))
|
zap.String("index params", string(jsonIndexParams)))
|
||||||
|
|
||||||
_, err = it.pool.Submit(func() (interface{}, error) {
|
it.index, err = indexcgowrapper.NewCgoIndex(dType, it.newTypeParams, it.newIndexParams, it.req.GetStorageConfig())
|
||||||
it.index, err = indexcgowrapper.NewCgoIndex(dType, it.newTypeParams, it.newIndexParams, it.req.GetStorageConfig())
|
if err != nil {
|
||||||
if err != nil {
|
log.Ctx(ctx).Error("failed to create index", zap.Error(err))
|
||||||
log.Ctx(ctx).Error("failed to create index", zap.Error(err))
|
} else {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = it.index.Build(dataset)
|
err = it.index.Build(dataset)
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if it.index != nil && it.index.CleanLocalData() != nil {
|
if it.index != nil && it.index.CleanLocalData() != nil {
|
||||||
log.Ctx(ctx).Error("failed to clean cached data on disk after build index failed",
|
log.Ctx(ctx).Error("failed to clean cached data on disk after build index failed",
|
||||||
|
|||||||
@ -36,7 +36,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
@ -155,8 +154,6 @@ type metaReplica struct {
|
|||||||
|
|
||||||
// segmentsBlackList stores segments which are still loading
|
// segmentsBlackList stores segments which are still loading
|
||||||
segmentsBlackList typeutil.UniqueSet
|
segmentsBlackList typeutil.UniqueSet
|
||||||
|
|
||||||
cgoPool *concurrency.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSegmentsMemSize get the memory size in bytes of all the Segments
|
// getSegmentsMemSize get the memory size in bytes of all the Segments
|
||||||
@ -577,7 +574,7 @@ func (replica *metaReplica) addSegment(segmentID UniqueID, partitionID UniqueID,
|
|||||||
collection.mu.Lock()
|
collection.mu.Lock()
|
||||||
defer collection.mu.Unlock()
|
defer collection.mu.Unlock()
|
||||||
|
|
||||||
seg, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, version, seekPosition, replica.cgoPool)
|
seg, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segType, version, seekPosition)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -922,7 +919,7 @@ func (replica *metaReplica) removeCollectionVDeltaChannel(collectionID UniqueID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newCollectionReplica returns a new ReplicaInterface
|
// newCollectionReplica returns a new ReplicaInterface
|
||||||
func newCollectionReplica(pool *concurrency.Pool) ReplicaInterface {
|
func newCollectionReplica() ReplicaInterface {
|
||||||
var replica ReplicaInterface = &metaReplica{
|
var replica ReplicaInterface = &metaReplica{
|
||||||
collections: make(map[UniqueID]*Collection),
|
collections: make(map[UniqueID]*Collection),
|
||||||
partitions: make(map[UniqueID]*Partition),
|
partitions: make(map[UniqueID]*Partition),
|
||||||
@ -932,8 +929,6 @@ func newCollectionReplica(pool *concurrency.Pool) ReplicaInterface {
|
|||||||
excludedSegments: make(map[UniqueID][]*datapb.SegmentInfo),
|
excludedSegments: make(map[UniqueID][]*datapb.SegmentInfo),
|
||||||
|
|
||||||
segmentsBlackList: make(typeutil.UniqueSet),
|
segmentsBlackList: make(typeutil.UniqueSet),
|
||||||
|
|
||||||
cgoPool: pool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return replica
|
return replica
|
||||||
|
|||||||
@ -17,7 +17,6 @@
|
|||||||
package querynode
|
package querynode
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -25,7 +24,6 @@ import (
|
|||||||
|
|
||||||
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMetaReplica_collection(t *testing.T) {
|
func TestMetaReplica_collection(t *testing.T) {
|
||||||
@ -228,9 +226,6 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer replica.freeAll()
|
defer replica.freeAll()
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := replica.addCollection(defaultCollectionID, schema)
|
collection := replica.addCollection(defaultCollectionID, schema)
|
||||||
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
||||||
@ -250,12 +245,12 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment1)
|
err = replica.setSegment(segment1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
segment2.setIndexedFieldInfo(fieldID, indexInfo)
|
segment2.setIndexedFieldInfo(fieldID, indexInfo)
|
||||||
err = replica.setSegment(segment2)
|
err = replica.setSegment(segment2)
|
||||||
@ -277,30 +272,27 @@ func TestMetaReplica_segment(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer replica.freeAll()
|
defer replica.freeAll()
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := replica.addCollection(defaultCollectionID, schema)
|
collection := replica.addCollection(defaultCollectionID, schema)
|
||||||
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
||||||
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
||||||
|
|
||||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment1)
|
err = replica.setSegment(segment1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment2)
|
err = replica.setSegment(segment2)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID+1, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment3)
|
err = replica.setSegment(segment3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment4, err := newSegment(collection, UniqueID(4), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment4, err := newSegment(collection, UniqueID(4), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = replica.setSegment(segment4)
|
err = replica.setSegment(segment4)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@ -352,16 +344,13 @@ func TestMetaReplica_BlackList(t *testing.T) {
|
|||||||
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
replica.addPartition(defaultCollectionID, defaultPartitionID)
|
||||||
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
replica.addPartition(defaultCollectionID, defaultPartitionID+1)
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
segment1, err := newSegment(collection, UniqueID(1), defaultPartitionID, defaultCollectionID, "channel1", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment2, err := newSegment(collection, UniqueID(2), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeSealed, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment3, err := newSegment(collection, UniqueID(3), defaultPartitionID, defaultCollectionID, "channel2", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
replica.addSegmentsLoadingList([]UniqueID{1, 2, 3})
|
replica.addSegmentsLoadingList([]UniqueID{1, 2, 3})
|
||||||
|
|||||||
@ -25,7 +25,6 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
@ -1242,10 +1241,6 @@ func genSealedSegment(schema *schemapb.CollectionSchema,
|
|||||||
vChannel Channel,
|
vChannel Channel,
|
||||||
msgLength int) (*Segment, error) {
|
msgLength int) (*Segment, error) {
|
||||||
col := newCollection(collectionID, schema)
|
col := newCollection(collectionID, schema)
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
seg, err := newSegment(col,
|
seg, err := newSegment(col,
|
||||||
segmentID,
|
segmentID,
|
||||||
@ -1254,8 +1249,7 @@ func genSealedSegment(schema *schemapb.CollectionSchema,
|
|||||||
vChannel,
|
vChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1292,28 +1286,20 @@ func genSimpleSealedSegment(msgLength int) (*Segment, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func genSimpleReplica() (ReplicaInterface, error) {
|
func genSimpleReplica() (ReplicaInterface, error) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
r := newCollectionReplica()
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r := newCollectionReplica(pool)
|
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
r.addCollection(defaultCollectionID, schema)
|
r.addCollection(defaultCollectionID, schema)
|
||||||
err = r.addPartition(defaultCollectionID, defaultPartitionID)
|
err := r.addPartition(defaultCollectionID, defaultPartitionID)
|
||||||
return r, err
|
return r, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func genSimpleSegmentLoaderWithMqFactory(metaReplica ReplicaInterface, factory msgstream.Factory) (*segmentLoader, error) {
|
func genSimpleSegmentLoaderWithMqFactory(metaReplica ReplicaInterface, factory msgstream.Factory) (*segmentLoader, error) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(1))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
kv, err := genEtcdKV()
|
kv, err := genEtcdKV()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cm := storage.NewLocalChunkManager(storage.RootPath(defaultLocalStorage))
|
cm := storage.NewLocalChunkManager(storage.RootPath(defaultLocalStorage))
|
||||||
return newSegmentLoader(metaReplica, kv, cm, factory, pool), nil
|
return newSegmentLoader(metaReplica, kv, cm, factory), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func genSimpleReplicaWithSealSegment(ctx context.Context) (ReplicaInterface, error) {
|
func genSimpleReplicaWithSealSegment(ctx context.Context) (ReplicaInterface, error) {
|
||||||
|
|||||||
@ -30,7 +30,6 @@ import "C"
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -77,9 +76,10 @@ var rateCol *rateCollector
|
|||||||
// services in querynode package.
|
// services in querynode package.
|
||||||
//
|
//
|
||||||
// QueryNode implements `types.Component`, `types.QueryNode` interfaces.
|
// QueryNode implements `types.Component`, `types.QueryNode` interfaces.
|
||||||
// `rootCoord` is a grpc client of root coordinator.
|
//
|
||||||
// `indexCoord` is a grpc client of index coordinator.
|
// `rootCoord` is a grpc client of root coordinator.
|
||||||
// `stateCode` is current statement of this query node, indicating whether it's healthy.
|
// `indexCoord` is a grpc client of index coordinator.
|
||||||
|
// `stateCode` is current statement of this query node, indicating whether it's healthy.
|
||||||
type QueryNode struct {
|
type QueryNode struct {
|
||||||
queryNodeLoopCtx context.Context
|
queryNodeLoopCtx context.Context
|
||||||
queryNodeLoopCancel context.CancelFunc
|
queryNodeLoopCancel context.CancelFunc
|
||||||
@ -121,8 +121,6 @@ type QueryNode struct {
|
|||||||
//shard query service, handles shard-level query & search
|
//shard query service, handles shard-level query & search
|
||||||
queryShardService *queryShardService
|
queryShardService *queryShardService
|
||||||
|
|
||||||
// cgoPool is the worker pool to control concurrency of cgo call
|
|
||||||
cgoPool *concurrency.Pool
|
|
||||||
// pool for load/release channel
|
// pool for load/release channel
|
||||||
taskPool *concurrency.Pool
|
taskPool *concurrency.Pool
|
||||||
}
|
}
|
||||||
@ -194,6 +192,9 @@ func (node *QueryNode) InitSegcore() {
|
|||||||
C.SegcoreInit(cEasyloggingYaml)
|
C.SegcoreInit(cEasyloggingYaml)
|
||||||
C.free(unsafe.Pointer(cEasyloggingYaml))
|
C.free(unsafe.Pointer(cEasyloggingYaml))
|
||||||
|
|
||||||
|
cpuNum := runtime.GOMAXPROCS(0)
|
||||||
|
C.SegcoreSetThreadPoolNum(C.uint32_t(cpuNum))
|
||||||
|
|
||||||
// override segcore chunk size
|
// override segcore chunk size
|
||||||
cChunkRows := C.int64_t(Params.QueryNodeCfg.ChunkRows)
|
cChunkRows := C.int64_t(Params.QueryNodeCfg.ChunkRows)
|
||||||
C.SegcoreSetChunkRows(cChunkRows)
|
C.SegcoreSetChunkRows(cChunkRows)
|
||||||
@ -256,13 +257,6 @@ func (node *QueryNode) Init() error {
|
|||||||
log.Info("queryNode try to connect etcd success", zap.Any("MetaRootPath", Params.EtcdCfg.MetaRootPath))
|
log.Info("queryNode try to connect etcd success", zap.Any("MetaRootPath", Params.EtcdCfg.MetaRootPath))
|
||||||
|
|
||||||
cpuNum := runtime.GOMAXPROCS(0)
|
cpuNum := runtime.GOMAXPROCS(0)
|
||||||
node.cgoPool, err = concurrency.NewPool(cpuNum, ants.WithPreAlloc(true),
|
|
||||||
ants.WithExpiryDuration(math.MaxInt64))
|
|
||||||
if err != nil {
|
|
||||||
log.Error("QueryNode init cgo pool failed", zap.Error(err))
|
|
||||||
initError = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
node.taskPool, err = concurrency.NewPool(cpuNum, ants.WithPreAlloc(true))
|
node.taskPool, err = concurrency.NewPool(cpuNum, ants.WithPreAlloc(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -271,30 +265,13 @@ func (node *QueryNode) Init() error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure every cgopool go routine is locked with a OS thread
|
node.metaReplica = newCollectionReplica()
|
||||||
// so openmp in knowhere won't create too much request
|
|
||||||
sig := make(chan struct{})
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(cpuNum)
|
|
||||||
for i := 0; i < cpuNum; i++ {
|
|
||||||
node.cgoPool.Submit(func() (interface{}, error) {
|
|
||||||
runtime.LockOSThread()
|
|
||||||
wg.Done()
|
|
||||||
<-sig
|
|
||||||
return nil, nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
close(sig)
|
|
||||||
|
|
||||||
node.metaReplica = newCollectionReplica(node.cgoPool)
|
|
||||||
|
|
||||||
node.loader = newSegmentLoader(
|
node.loader = newSegmentLoader(
|
||||||
node.metaReplica,
|
node.metaReplica,
|
||||||
node.etcdKV,
|
node.etcdKV,
|
||||||
node.vectorStorage,
|
node.vectorStorage,
|
||||||
node.factory,
|
node.factory)
|
||||||
node.cgoPool)
|
|
||||||
|
|
||||||
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.metaReplica, node.tSafeReplica, node.factory)
|
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.metaReplica, node.tSafeReplica, node.factory)
|
||||||
|
|
||||||
|
|||||||
@ -21,7 +21,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -29,7 +28,6 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.etcd.io/etcd/server/v3/embed"
|
"go.etcd.io/etcd/server/v3/embed"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||||
|
|
||||||
@ -101,19 +99,14 @@ func newQueryNodeMock() *QueryNode {
|
|||||||
svr := NewQueryNode(ctx, factory)
|
svr := NewQueryNode(ctx, factory)
|
||||||
tsReplica := newTSafeReplica()
|
tsReplica := newTSafeReplica()
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
replica := newCollectionReplica()
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
replica := newCollectionReplica(pool)
|
|
||||||
svr.metaReplica = replica
|
svr.metaReplica = replica
|
||||||
svr.dataSyncService = newDataSyncService(ctx, svr.metaReplica, tsReplica, factory)
|
svr.dataSyncService = newDataSyncService(ctx, svr.metaReplica, tsReplica, factory)
|
||||||
svr.vectorStorage, err = factory.NewPersistentStorageChunkManager(ctx)
|
svr.vectorStorage, err = factory.NewPersistentStorageChunkManager(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
svr.loader = newSegmentLoader(svr.metaReplica, etcdKV, svr.vectorStorage, factory, pool)
|
svr.loader = newSegmentLoader(svr.metaReplica, etcdKV, svr.vectorStorage, factory)
|
||||||
svr.etcdKV = etcdKV
|
svr.etcdKV = etcdKV
|
||||||
|
|
||||||
return svr
|
return svr
|
||||||
|
|||||||
@ -35,7 +35,6 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||||
"github.com/milvus-io/milvus/internal/util/paramtable"
|
"github.com/milvus-io/milvus/internal/util/paramtable"
|
||||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||||
@ -102,8 +101,6 @@ type Segment struct {
|
|||||||
// only used by sealed segments
|
// only used by sealed segments
|
||||||
currentStat *storage.PkStatistics
|
currentStat *storage.PkStatistics
|
||||||
historyStats []*storage.PkStatistics
|
historyStats []*storage.PkStatistics
|
||||||
|
|
||||||
pool *concurrency.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the identity number.
|
// ID returns the identity number.
|
||||||
@ -175,8 +172,7 @@ func newSegment(collection *Collection,
|
|||||||
vChannelID Channel,
|
vChannelID Channel,
|
||||||
segType segmentType,
|
segType segmentType,
|
||||||
version UniqueID,
|
version UniqueID,
|
||||||
startPosition *internalpb.MsgPosition,
|
startPosition *internalpb.MsgPosition) (*Segment, error) {
|
||||||
pool *concurrency.Pool) (*Segment, error) {
|
|
||||||
/*
|
/*
|
||||||
CSegmentInterface
|
CSegmentInterface
|
||||||
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
|
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
|
||||||
@ -184,15 +180,9 @@ func newSegment(collection *Collection,
|
|||||||
var segmentPtr C.CSegmentInterface
|
var segmentPtr C.CSegmentInterface
|
||||||
switch segType {
|
switch segType {
|
||||||
case segmentTypeSealed:
|
case segmentTypeSealed:
|
||||||
pool.Submit(func() (interface{}, error) {
|
segmentPtr = C.NewSegment(collection.collectionPtr, C.Sealed, C.int64_t(segmentID))
|
||||||
segmentPtr = C.NewSegment(collection.collectionPtr, C.Sealed, C.int64_t(segmentID))
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
case segmentTypeGrowing:
|
case segmentTypeGrowing:
|
||||||
pool.Submit(func() (interface{}, error) {
|
segmentPtr = C.NewSegment(collection.collectionPtr, C.Growing, C.int64_t(segmentID))
|
||||||
segmentPtr = C.NewSegment(collection.collectionPtr, C.Growing, C.int64_t(segmentID))
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
default:
|
default:
|
||||||
err := fmt.Errorf("illegal segment type %d when create segment %d", segType, segmentID)
|
err := fmt.Errorf("illegal segment type %d when create segment %d", segType, segmentID)
|
||||||
log.Warn("create new segment error",
|
log.Warn("create new segment error",
|
||||||
@ -223,7 +213,6 @@ func newSegment(collection *Collection,
|
|||||||
recentlyModified: atomic.NewBool(false),
|
recentlyModified: atomic.NewBool(false),
|
||||||
destroyed: atomic.NewBool(false),
|
destroyed: atomic.NewBool(false),
|
||||||
historyStats: []*storage.PkStatistics{},
|
historyStats: []*storage.PkStatistics{},
|
||||||
pool: pool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return segment, nil
|
return segment, nil
|
||||||
@ -246,10 +235,7 @@ func deleteSegment(segment *Segment) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
segment.pool.Submit(func() (interface{}, error) {
|
C.DeleteSegment(cPtr)
|
||||||
C.DeleteSegment(cPtr)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
segment.currentStat = nil
|
segment.currentStat = nil
|
||||||
segment.historyStats = nil
|
segment.historyStats = nil
|
||||||
@ -271,11 +257,8 @@ func (s *Segment) getRealCount() int64 {
|
|||||||
if !s.healthy() {
|
if !s.healthy() {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
var rowCount C.int64_t
|
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
rowCount := C.GetRealCount(s.segmentPtr)
|
||||||
rowCount = C.GetRealCount(s.segmentPtr)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
return int64(rowCount)
|
return int64(rowCount)
|
||||||
}
|
}
|
||||||
@ -290,11 +273,8 @@ func (s *Segment) getRowCount() int64 {
|
|||||||
if !s.healthy() {
|
if !s.healthy() {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
var rowCount C.int64_t
|
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
rowCount := C.GetRowCount(s.segmentPtr)
|
||||||
rowCount = C.GetRowCount(s.segmentPtr)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
return int64(rowCount)
|
return int64(rowCount)
|
||||||
}
|
}
|
||||||
@ -310,11 +290,7 @@ func (s *Segment) getDeletedCount() int64 {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
var deletedCount C.int64_t
|
deletedCount := C.GetRowCount(s.segmentPtr)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
deletedCount = C.GetRowCount(s.segmentPtr)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
return int64(deletedCount)
|
return int64(deletedCount)
|
||||||
}
|
}
|
||||||
@ -329,11 +305,7 @@ func (s *Segment) getMemSize() int64 {
|
|||||||
if !s.healthy() {
|
if !s.healthy() {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
var memoryUsageInBytes C.int64_t
|
memoryUsageInBytes := C.GetMemoryUsageInBytes(s.segmentPtr)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
memoryUsageInBytes = C.GetMemoryUsageInBytes(s.segmentPtr)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
return int64(memoryUsageInBytes)
|
return int64(memoryUsageInBytes)
|
||||||
}
|
}
|
||||||
@ -366,14 +338,10 @@ func (s *Segment) search(searchReq *searchRequest) (*SearchResult, error) {
|
|||||||
zap.String("segmentType", s.segmentType.String()),
|
zap.String("segmentType", s.segmentType.String()),
|
||||||
zap.Bool("loadIndex", loadIndex))
|
zap.Bool("loadIndex", loadIndex))
|
||||||
|
|
||||||
var status C.CStatus
|
tr := timerecord.NewTimeRecorder("cgoSearch")
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
status := C.Search(s.segmentPtr, searchReq.plan.cSearchPlan, searchReq.cPlaceholderGroup,
|
||||||
tr := timerecord.NewTimeRecorder("cgoSearch")
|
C.uint64_t(searchReq.timestamp), &searchResult.cSearchResult)
|
||||||
status = C.Search(s.segmentPtr, searchReq.plan.cSearchPlan, searchReq.cPlaceholderGroup,
|
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||||
C.uint64_t(searchReq.timestamp), &searchResult.cSearchResult)
|
|
||||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SearchLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
if err := HandleCStatus(&status, "Search failed"); err != nil {
|
if err := HandleCStatus(&status, "Search failed"); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -395,18 +363,13 @@ func (s *Segment) retrieve(plan *RetrievePlan) (*segcorepb.RetrieveResults, erro
|
|||||||
var retrieveResult RetrieveResult
|
var retrieveResult RetrieveResult
|
||||||
ts := C.uint64_t(plan.Timestamp)
|
ts := C.uint64_t(plan.Timestamp)
|
||||||
|
|
||||||
var status C.CStatus
|
tr := timerecord.NewTimeRecorder("cgoRetrieve")
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
status := C.Retrieve(s.segmentPtr, plan.cRetrievePlan, ts, &retrieveResult.cRetrieveResult)
|
||||||
tr := timerecord.NewTimeRecorder("cgoRetrieve")
|
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
|
||||||
status = C.Retrieve(s.segmentPtr, plan.cRetrievePlan, ts, &retrieveResult.cRetrieveResult)
|
metrics.QueryLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
||||||
metrics.QueryNodeSQSegmentLatencyInCore.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()),
|
log.Debug("do retrieve on segment",
|
||||||
metrics.QueryLabel).Observe(float64(tr.ElapseSpan().Milliseconds()))
|
zap.Int64("msgID", plan.msgID),
|
||||||
log.Debug("do retrieve on segment",
|
zap.Int64("segmentID", s.segmentID), zap.String("segmentType", s.segmentType.String()))
|
||||||
zap.Int64("msgID", plan.msgID),
|
|
||||||
zap.Int64("segmentID", s.segmentID), zap.String("segmentType", s.segmentType.String()))
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
if err := HandleCStatus(&status, "Retrieve failed"); err != nil {
|
if err := HandleCStatus(&status, "Retrieve failed"); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -687,12 +650,9 @@ func (s *Segment) segmentPreInsert(numOfRecords int) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var offset int64
|
var offset int64
|
||||||
var status C.CStatus
|
|
||||||
cOffset := (*C.int64_t)(&offset)
|
cOffset := (*C.int64_t)(&offset)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
status := C.PreInsert(s.segmentPtr, C.int64_t(int64(numOfRecords)), cOffset)
|
||||||
status = C.PreInsert(s.segmentPtr, C.int64_t(int64(numOfRecords)), cOffset)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
if err := HandleCStatus(&status, "PreInsert failed"); err != nil {
|
if err := HandleCStatus(&status, "PreInsert failed"); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -710,12 +670,7 @@ func (s *Segment) segmentPreDelete(numOfRecords int) int64 {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset C.int64_t
|
offset := C.PreDelete(s.segmentPtr, C.int64_t(int64(numOfRecords)))
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
offset = C.PreDelete(s.segmentPtr, C.int64_t(int64(numOfRecords)))
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
return int64(offset)
|
return int64(offset)
|
||||||
}
|
}
|
||||||
@ -742,19 +697,13 @@ func (s *Segment) segmentInsert(offset int64, entityIDs []UniqueID, timestamps [
|
|||||||
var cEntityIdsPtr = (*C.int64_t)(&(entityIDs)[0])
|
var cEntityIdsPtr = (*C.int64_t)(&(entityIDs)[0])
|
||||||
var cTimestampsPtr = (*C.uint64_t)(&(timestamps)[0])
|
var cTimestampsPtr = (*C.uint64_t)(&(timestamps)[0])
|
||||||
|
|
||||||
var status C.CStatus
|
status := C.Insert(s.segmentPtr,
|
||||||
|
cOffset,
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
cNumOfRows,
|
||||||
status = C.Insert(s.segmentPtr,
|
cEntityIdsPtr,
|
||||||
cOffset,
|
cTimestampsPtr,
|
||||||
cNumOfRows,
|
(*C.uint8_t)(unsafe.Pointer(&insertRecordBlob[0])),
|
||||||
cEntityIdsPtr,
|
(C.uint64_t)(len(insertRecordBlob)))
|
||||||
cTimestampsPtr,
|
|
||||||
(*C.uint8_t)(unsafe.Pointer(&insertRecordBlob[0])),
|
|
||||||
(C.uint64_t)(len(insertRecordBlob)))
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
if err := HandleCStatus(&status, "Insert failed"); err != nil {
|
if err := HandleCStatus(&status, "Insert failed"); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -828,12 +777,7 @@ func (s *Segment) segmentDelete(offset int64, entityIDs []primaryKey, timestamps
|
|||||||
return fmt.Errorf("failed to marshal ids: %s", err)
|
return fmt.Errorf("failed to marshal ids: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var status C.CStatus
|
status := C.Delete(s.segmentPtr, cOffset, cSize, (*C.uint8_t)(unsafe.Pointer(&dataBlob[0])), (C.uint64_t)(len(dataBlob)), cTimestampsPtr)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
status = C.Delete(s.segmentPtr, cOffset, cSize, (*C.uint8_t)(unsafe.Pointer(&dataBlob[0])), (C.uint64_t)(len(dataBlob)), cTimestampsPtr)
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
if err := HandleCStatus(&status, "Delete failed"); err != nil {
|
if err := HandleCStatus(&status, "Delete failed"); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -870,11 +814,7 @@ func (s *Segment) segmentLoadFieldData(fieldID int64, rowCount int64, data *sche
|
|||||||
row_count: C.int64_t(rowCount),
|
row_count: C.int64_t(rowCount),
|
||||||
}
|
}
|
||||||
|
|
||||||
var status C.CStatus
|
status := C.LoadFieldData(s.segmentPtr, loadInfo)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
status = C.LoadFieldData(s.segmentPtr, loadInfo)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
if err := HandleCStatus(&status, "LoadFieldData failed"); err != nil {
|
if err := HandleCStatus(&status, "LoadFieldData failed"); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -940,11 +880,7 @@ func (s *Segment) segmentLoadDeletedRecord(primaryKeys []primaryKey, timestamps
|
|||||||
CStatus
|
CStatus
|
||||||
LoadDeletedRecord(CSegmentInterface c_segment, CLoadDeletedRecordInfo deleted_record_info)
|
LoadDeletedRecord(CSegmentInterface c_segment, CLoadDeletedRecordInfo deleted_record_info)
|
||||||
*/
|
*/
|
||||||
var status C.CStatus
|
status := C.LoadDeletedRecord(s.segmentPtr, loadInfo)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
status = C.LoadDeletedRecord(s.segmentPtr, loadInfo)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
if err := HandleCStatus(&status, "LoadDeletedRecord failed"); err != nil {
|
if err := HandleCStatus(&status, "LoadDeletedRecord failed"); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -983,11 +919,7 @@ func (s *Segment) segmentLoadIndexData(bytesIndex [][]byte, indexInfo *querypb.F
|
|||||||
return fmt.Errorf("%w(segmentID=%d)", ErrSegmentUnhealthy, s.segmentID)
|
return fmt.Errorf("%w(segmentID=%d)", ErrSegmentUnhealthy, s.segmentID)
|
||||||
}
|
}
|
||||||
|
|
||||||
var status C.CStatus
|
status := C.UpdateSealedSegmentIndex(s.segmentPtr, loadIndexInfo.cLoadIndexInfo)
|
||||||
s.pool.Submit(func() (interface{}, error) {
|
|
||||||
status = C.UpdateSealedSegmentIndex(s.segmentPtr, loadIndexInfo.cLoadIndexInfo)
|
|
||||||
return nil, nil
|
|
||||||
}).Await()
|
|
||||||
|
|
||||||
if err := HandleCStatus(&status, "UpdateSealedSegmentIndex failed"); err != nil {
|
if err := HandleCStatus(&status, "UpdateSealedSegmentIndex failed"); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -74,8 +74,6 @@ type segmentLoader struct {
|
|||||||
|
|
||||||
ioPool *concurrency.Pool
|
ioPool *concurrency.Pool
|
||||||
cpuPool *concurrency.Pool
|
cpuPool *concurrency.Pool
|
||||||
// cgoPool for all cgo invocation
|
|
||||||
cgoPool *concurrency.Pool
|
|
||||||
|
|
||||||
factory msgstream.Factory
|
factory msgstream.Factory
|
||||||
}
|
}
|
||||||
@ -154,7 +152,7 @@ func (loader *segmentLoader) LoadSegment(ctx context.Context, req *querypb.LoadS
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
segment, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segmentType, req.GetVersion(), info.StartPosition, loader.cgoPool)
|
segment, err := newSegment(collection, segmentID, partitionID, collectionID, vChannelID, segmentType, req.GetVersion(), info.StartPosition)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("load segment failed when create new segment",
|
log.Error("load segment failed when create new segment",
|
||||||
zap.Int64("partitionID", partitionID),
|
zap.Int64("partitionID", partitionID),
|
||||||
@ -980,8 +978,7 @@ func newSegmentLoader(
|
|||||||
metaReplica ReplicaInterface,
|
metaReplica ReplicaInterface,
|
||||||
etcdKV *etcdkv.EtcdKV,
|
etcdKV *etcdkv.EtcdKV,
|
||||||
cm storage.ChunkManager,
|
cm storage.ChunkManager,
|
||||||
factory msgstream.Factory,
|
factory msgstream.Factory) *segmentLoader {
|
||||||
pool *concurrency.Pool) *segmentLoader {
|
|
||||||
|
|
||||||
cpuNum := runtime.GOMAXPROCS(0)
|
cpuNum := runtime.GOMAXPROCS(0)
|
||||||
ioPoolSize := cpuNum * 8
|
ioPoolSize := cpuNum * 8
|
||||||
@ -1019,7 +1016,6 @@ func newSegmentLoader(
|
|||||||
// init them later
|
// init them later
|
||||||
ioPool: ioPool,
|
ioPool: ioPool,
|
||||||
cpuPool: cpuPool,
|
cpuPool: cpuPool,
|
||||||
cgoPool: pool,
|
|
||||||
|
|
||||||
factory: factory,
|
factory: factory,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -39,7 +39,6 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/storage"
|
"github.com/milvus-io/milvus/internal/storage"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -187,9 +186,6 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
|
|||||||
loader := node.loader
|
loader := node.loader
|
||||||
assert.NotNil(t, loader)
|
assert.NotNil(t, loader)
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var fieldPk *schemapb.FieldSchema
|
var fieldPk *schemapb.FieldSchema
|
||||||
switch pkType {
|
switch pkType {
|
||||||
case schemapb.DataType_Int64:
|
case schemapb.DataType_Int64:
|
||||||
@ -239,8 +235,7 @@ func TestSegmentLoader_loadSegmentFieldsData(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
|
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
|
||||||
@ -379,9 +374,6 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||||||
loader := node.loader
|
loader := node.loader
|
||||||
assert.NotNil(t, loader)
|
assert.NotNil(t, loader)
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cm := &mocks.ChunkManager{}
|
cm := &mocks.ChunkManager{}
|
||||||
cm.EXPECT().Read(mock.Anything, mock.AnythingOfType("string")).Return(nil, errors.New("mocked"))
|
cm.EXPECT().Read(mock.Anything, mock.AnythingOfType("string")).Return(nil, errors.New("mocked"))
|
||||||
|
|
||||||
@ -405,8 +397,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
|
binlog, _, err := saveBinLog(ctx, defaultCollectionID, defaultPartitionID, defaultSegmentID, defaultMsgLength, schema)
|
||||||
@ -424,9 +415,6 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||||||
loader := node.loader
|
loader := node.loader
|
||||||
assert.NotNil(t, loader)
|
assert.NotNil(t, loader)
|
||||||
|
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cm := &mocks.ChunkManager{}
|
cm := &mocks.ChunkManager{}
|
||||||
cm.EXPECT().Read(mock.Anything, mock.AnythingOfType("string")).Return(nil, errors.New("mocked"))
|
cm.EXPECT().Read(mock.Anything, mock.AnythingOfType("string")).Return(nil, errors.New("mocked"))
|
||||||
|
|
||||||
@ -450,8 +438,7 @@ func TestSegmentLoader_invalid(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
err = loader.loadFieldIndexData(ctx, segment, &querypb.FieldIndexInfo{
|
err = loader.loadFieldIndexData(ctx, segment, &querypb.FieldIndexInfo{
|
||||||
@ -495,7 +482,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||||||
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, loader.cgoPool)
|
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
||||||
@ -526,7 +513,7 @@ func TestSegmentLoader_testLoadGrowing(t *testing.T) {
|
|||||||
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
collection, err := node.metaReplica.getCollectionByID(defaultCollectionID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, node.loader.cgoPool)
|
segment, err := newSegment(collection, defaultSegmentID+1, defaultPartitionID, defaultCollectionID, defaultDMLChannel, segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
insertData, err := genInsertData(defaultMsgLength, collection.schema)
|
||||||
|
|||||||
@ -20,7 +20,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
@ -35,15 +34,11 @@ import (
|
|||||||
"github.com/milvus-io/milvus/internal/proto/querypb"
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
||||||
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
||||||
"github.com/milvus-io/milvus/internal/storage"
|
"github.com/milvus-io/milvus/internal/storage"
|
||||||
"github.com/milvus-io/milvus/internal/util/concurrency"
|
|
||||||
"github.com/milvus-io/milvus/internal/util/funcutil"
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------- constructor and destructor
|
// -------------------------------------------------------------------------------------- constructor and destructor
|
||||||
func TestSegment_newSegment(t *testing.T) {
|
func TestSegment_newSegment(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collectionMeta := genCollectionMeta(collectionID, schema)
|
collectionMeta := genCollectionMeta(collectionID, schema)
|
||||||
@ -52,7 +47,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
deleteSegment(segment)
|
deleteSegment(segment)
|
||||||
@ -62,15 +57,12 @@ func TestSegment_newSegment(t *testing.T) {
|
|||||||
_, err = newSegment(collection,
|
_, err = newSegment(collection,
|
||||||
defaultSegmentID,
|
defaultSegmentID,
|
||||||
defaultPartitionID,
|
defaultPartitionID,
|
||||||
collectionID, "", 100, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
collectionID, "", 100, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_deleteSegment(t *testing.T) {
|
func TestSegment_deleteSegment(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collectionMeta := genCollectionMeta(collectionID, schema)
|
collectionMeta := genCollectionMeta(collectionID, schema)
|
||||||
@ -79,7 +71,7 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -96,9 +88,6 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||||||
|
|
||||||
// -------------------------------------------------------------------------------------- stats functions
|
// -------------------------------------------------------------------------------------- stats functions
|
||||||
func TestSegment_getRowCount(t *testing.T) {
|
func TestSegment_getRowCount(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
|
|
||||||
@ -106,7 +95,7 @@ func TestSegment_getRowCount(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -141,9 +130,6 @@ func TestSegment_getRowCount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_retrieve(t *testing.T) {
|
func TestSegment_retrieve(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
|
|
||||||
@ -151,7 +137,7 @@ func TestSegment_retrieve(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -228,9 +214,6 @@ func TestSegment_retrieve(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_getDeletedCount(t *testing.T) {
|
func TestSegment_getDeletedCount(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
|
|
||||||
@ -238,7 +221,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -280,9 +263,6 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_getMemSize(t *testing.T) {
|
func TestSegment_getMemSize(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
|
|
||||||
@ -290,7 +270,7 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -319,16 +299,13 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||||||
|
|
||||||
// -------------------------------------------------------------------------------------- dm & search functions
|
// -------------------------------------------------------------------------------------- dm & search functions
|
||||||
func TestSegment_segmentInsert(t *testing.T) {
|
func TestSegment_segmentInsert(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
|
|
||||||
collection := newCollection(collectionID, schema)
|
collection := newCollection(collectionID, schema)
|
||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -366,16 +343,13 @@ func TestSegment_segmentInsert(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_segmentDelete(t *testing.T) {
|
func TestSegment_segmentDelete(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := newCollection(collectionID, schema)
|
collection := newCollection(collectionID, schema)
|
||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -464,16 +438,13 @@ func TestSegment_segmentSearch(t *testing.T) {
|
|||||||
|
|
||||||
// -------------------------------------------------------------------------------------- preDm functions
|
// -------------------------------------------------------------------------------------- preDm functions
|
||||||
func TestSegment_segmentPreInsert(t *testing.T) {
|
func TestSegment_segmentPreInsert(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := newCollection(collectionID, schema)
|
collection := newCollection(collectionID, schema)
|
||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -486,16 +457,13 @@ func TestSegment_segmentPreInsert(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_segmentPreDelete(t *testing.T) {
|
func TestSegment_segmentPreDelete(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
collectionID := UniqueID(0)
|
collectionID := UniqueID(0)
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := newCollection(collectionID, schema)
|
collection := newCollection(collectionID, schema)
|
||||||
assert.Equal(t, collection.ID(), collectionID)
|
assert.Equal(t, collection.ID(), collectionID)
|
||||||
|
|
||||||
segmentID := UniqueID(0)
|
segmentID := UniqueID(0)
|
||||||
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition, pool)
|
segment, err := newSegment(collection, segmentID, defaultPartitionID, collectionID, "", segmentTypeGrowing, defaultSegmentVersion, defaultSegmentStartPosition)
|
||||||
assert.Equal(t, segmentID, segment.segmentID)
|
assert.Equal(t, segmentID, segment.segmentID)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
@ -521,9 +489,6 @@ func TestSegment_segmentPreDelete(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fieldParam := constFieldParam{
|
fieldParam := constFieldParam{
|
||||||
id: 100,
|
id: 100,
|
||||||
dataType: schemapb.DataType_Int64,
|
dataType: schemapb.DataType_Int64,
|
||||||
@ -544,8 +509,7 @@ func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
ids := []int64{1, 2, 3}
|
ids := []int64{1, 2, 3}
|
||||||
pks := make([]primaryKey, 0)
|
pks := make([]primaryKey, 0)
|
||||||
@ -614,9 +578,6 @@ func TestSegment_indexInfo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSegment_BasicMetrics(t *testing.T) {
|
func TestSegment_BasicMetrics(t *testing.T) {
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := newCollection(defaultCollectionID, schema)
|
collection := newCollection(defaultCollectionID, schema)
|
||||||
segment, err := newSegment(collection,
|
segment, err := newSegment(collection,
|
||||||
@ -626,8 +587,7 @@ func TestSegment_BasicMetrics(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
t.Run("test id binlog row size", func(t *testing.T) {
|
t.Run("test id binlog row size", func(t *testing.T) {
|
||||||
@ -666,9 +626,6 @@ func TestSegment_BasicMetrics(t *testing.T) {
|
|||||||
func TestSegment_fillIndexedFieldsData(t *testing.T) {
|
func TestSegment_fillIndexedFieldsData(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
pool, err := concurrency.NewPool(runtime.GOMAXPROCS(0))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
schema := genTestCollectionSchema()
|
schema := genTestCollectionSchema()
|
||||||
collection := newCollection(defaultCollectionID, schema)
|
collection := newCollection(defaultCollectionID, schema)
|
||||||
segment, err := newSegment(collection,
|
segment, err := newSegment(collection,
|
||||||
@ -678,8 +635,7 @@ func TestSegment_fillIndexedFieldsData(t *testing.T) {
|
|||||||
defaultDMLChannel,
|
defaultDMLChannel,
|
||||||
segmentTypeSealed,
|
segmentTypeSealed,
|
||||||
defaultSegmentVersion,
|
defaultSegmentVersion,
|
||||||
defaultSegmentStartPosition,
|
defaultSegmentStartPosition)
|
||||||
pool)
|
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
vecCM, err := genVectorChunkManager(ctx, collection)
|
vecCM, err := genVectorChunkManager(ctx, collection)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user