mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
fix: Fix dispatcher deregister and seek (#40860)
1. Fix deregister dispatcher concurrency. (Keep sure the same logic as 2.5 branch) 2. Fix seek if includeCurrentMsg. (This is only needed by CDC, so there's no need to pick to 2.5 branch) issue: issue: https://github.com/milvus-io/milvus/issues/39862 pr: https://github.com/milvus-io/milvus/pull/39863 --------- Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
parent
3ecacc4493
commit
d8d1dcf076
@ -110,7 +110,7 @@ func TestClient_Concurrency(t *testing.T) {
|
|||||||
// Verify registered targets number.
|
// Verify registered targets number.
|
||||||
actual := 0
|
actual := 0
|
||||||
c.managers.Range(func(pchannel string, manager DispatcherManager) bool {
|
c.managers.Range(func(pchannel string, manager DispatcherManager) bool {
|
||||||
actual += manager.(*dispatcherManager).registeredTargets.Len()
|
actual += manager.NumTarget()
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
assert.Equal(t, expected, actual)
|
assert.Equal(t, expected, actual)
|
||||||
@ -120,7 +120,14 @@ func TestClient_Concurrency(t *testing.T) {
|
|||||||
actual = 0
|
actual = 0
|
||||||
c.managers.Range(func(pchannel string, manager DispatcherManager) bool {
|
c.managers.Range(func(pchannel string, manager DispatcherManager) bool {
|
||||||
m := manager.(*dispatcherManager)
|
m := manager.(*dispatcherManager)
|
||||||
actual += int(m.numActiveTarget.Load())
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
if m.mainDispatcher != nil {
|
||||||
|
actual += m.mainDispatcher.targets.Len()
|
||||||
|
}
|
||||||
|
for _, d := range m.deputyDispatchers {
|
||||||
|
actual += d.targets.Len()
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
t.Logf("expect = %d, actual = %d\n", expected, actual)
|
t.Logf("expect = %d, actual = %d\n", expected, actual)
|
||||||
@ -256,9 +263,9 @@ func (suite *SimulationSuite) TestMerge() {
|
|||||||
suite.Eventually(func() bool {
|
suite.Eventually(func() bool {
|
||||||
for pchannel := range suite.pchannel2Producer {
|
for pchannel := range suite.pchannel2Producer {
|
||||||
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
||||||
suite.T().Logf("dispatcherNum = %d, pchannel = %s\n", manager.(*dispatcherManager).numConsumer.Load(), pchannel)
|
suite.T().Logf("dispatcherNum = %d, pchannel = %s\n", manager.NumConsumer(), pchannel)
|
||||||
suite.True(ok)
|
suite.True(ok)
|
||||||
if manager.(*dispatcherManager).numConsumer.Load() != 1 { // expected all merged, only mainDispatcher exist
|
if manager.NumConsumer() != 1 { // expected all merged, only mainDispatcher exist
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -323,9 +330,9 @@ func (suite *SimulationSuite) TestSplit() {
|
|||||||
suite.Eventually(func() bool {
|
suite.Eventually(func() bool {
|
||||||
for pchannel := range suite.pchannel2Producer {
|
for pchannel := range suite.pchannel2Producer {
|
||||||
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
||||||
suite.T().Logf("verifing dispatchers merged, dispatcherNum = %d, pchannel = %s\n", manager.(*dispatcherManager).numConsumer.Load(), pchannel)
|
suite.T().Logf("verifing dispatchers merged, dispatcherNum = %d, pchannel = %s\n", manager.NumConsumer(), pchannel)
|
||||||
suite.True(ok)
|
suite.True(ok)
|
||||||
if manager.(*dispatcherManager).numConsumer.Load() != 1 { // expected all merged, only mainDispatcher exist
|
if manager.NumConsumer() != 1 { // expected all merged, only mainDispatcher exist
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -371,8 +378,8 @@ func (suite *SimulationSuite) TestSplit() {
|
|||||||
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
||||||
suite.True(ok)
|
suite.True(ok)
|
||||||
suite.T().Logf("verifing split, dispatcherNum = %d, splitNum+1 = %d, pchannel = %s\n",
|
suite.T().Logf("verifing split, dispatcherNum = %d, splitNum+1 = %d, pchannel = %s\n",
|
||||||
manager.(*dispatcherManager).numConsumer.Load(), splitNumPerPchannel+1, pchannel)
|
manager.NumConsumer(), splitNumPerPchannel+1, pchannel)
|
||||||
if manager.(*dispatcherManager).numConsumer.Load() < 1 { // expected 1 mainDispatcher and 1 or more split deputyDispatchers
|
if manager.NumConsumer() < 1 { // expected 1 mainDispatcher and 1 or more split deputyDispatchers
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -393,9 +400,9 @@ func (suite *SimulationSuite) TestSplit() {
|
|||||||
suite.Eventually(func() bool {
|
suite.Eventually(func() bool {
|
||||||
for pchannel := range suite.pchannel2Producer {
|
for pchannel := range suite.pchannel2Producer {
|
||||||
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
manager, ok := suite.client.(*client).managers.Get(pchannel)
|
||||||
suite.T().Logf("verifing dispatchers merged again, dispatcherNum = %d, pchannel = %s\n", manager.(*dispatcherManager).numConsumer.Load(), pchannel)
|
suite.T().Logf("verifing dispatchers merged again, dispatcherNum = %d, pchannel = %s\n", manager.NumConsumer(), pchannel)
|
||||||
suite.True(ok)
|
suite.True(ok)
|
||||||
if manager.(*dispatcherManager).numConsumer.Load() != 1 { // expected all merged, only mainDispatcher exist
|
if manager.NumConsumer() != 1 { // expected all merged, only mainDispatcher exist
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -87,6 +87,7 @@ func NewDispatcher(
|
|||||||
pchannel string,
|
pchannel string,
|
||||||
position *Pos,
|
position *Pos,
|
||||||
subPos SubPos,
|
subPos SubPos,
|
||||||
|
includeCurrentMsg bool,
|
||||||
pullbackEndTs typeutil.Timestamp,
|
pullbackEndTs typeutil.Timestamp,
|
||||||
) (*Dispatcher, error) {
|
) (*Dispatcher, error) {
|
||||||
subName := fmt.Sprintf("%s-%d-%d", pchannel, id, time.Now().UnixNano())
|
subName := fmt.Sprintf("%s-%d-%d", pchannel, id, time.Now().UnixNano())
|
||||||
@ -116,7 +117,7 @@ func NewDispatcher(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Info("as consumer done", zap.Any("position", position))
|
log.Info("as consumer done", zap.Any("position", position))
|
||||||
err = stream.Seek(ctx, []*Pos{position}, false)
|
err = stream.Seek(ctx, []*Pos{position}, includeCurrentMsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("seek failed", zap.Error(err))
|
log.Error("seek failed", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@ -36,7 +36,7 @@ func TestDispatcher(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
t.Run("test base", func(t *testing.T) {
|
t.Run("test base", func(t *testing.T) {
|
||||||
d, err := NewDispatcher(ctx, newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
d, err := NewDispatcher(ctx, newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
||||||
nil, common.SubscriptionPositionEarliest, 0)
|
nil, common.SubscriptionPositionEarliest, false, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
d.Handle(start)
|
d.Handle(start)
|
||||||
@ -65,7 +65,7 @@ func TestDispatcher(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
d, err := NewDispatcher(ctx, factory, time.Now().UnixNano(), "mock_pchannel_0",
|
d, err := NewDispatcher(ctx, factory, time.Now().UnixNano(), "mock_pchannel_0",
|
||||||
nil, common.SubscriptionPositionEarliest, 0)
|
nil, common.SubscriptionPositionEarliest, false, 0)
|
||||||
|
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Nil(t, d)
|
assert.Nil(t, d)
|
||||||
@ -73,7 +73,7 @@ func TestDispatcher(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("test target", func(t *testing.T) {
|
t.Run("test target", func(t *testing.T) {
|
||||||
d, err := NewDispatcher(ctx, newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
d, err := NewDispatcher(ctx, newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
||||||
nil, common.SubscriptionPositionEarliest, 0)
|
nil, common.SubscriptionPositionEarliest, false, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
output := make(chan *msgstream.MsgPack, 1024)
|
output := make(chan *msgstream.MsgPack, 1024)
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ func TestDispatcher(t *testing.T) {
|
|||||||
|
|
||||||
func BenchmarkDispatcher_handle(b *testing.B) {
|
func BenchmarkDispatcher_handle(b *testing.B) {
|
||||||
d, err := NewDispatcher(context.Background(), newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
d, err := NewDispatcher(context.Background(), newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
||||||
nil, common.SubscriptionPositionEarliest, 0)
|
nil, common.SubscriptionPositionEarliest, false, 0)
|
||||||
assert.NoError(b, err)
|
assert.NoError(b, err)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@ -143,7 +143,7 @@ func BenchmarkDispatcher_handle(b *testing.B) {
|
|||||||
|
|
||||||
func TestGroupMessage(t *testing.T) {
|
func TestGroupMessage(t *testing.T) {
|
||||||
d, err := NewDispatcher(context.Background(), newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
d, err := NewDispatcher(context.Background(), newMockFactory(), time.Now().UnixNano(), "mock_pchannel_0",
|
||||||
nil, common.SubscriptionPositionEarliest, 0)
|
nil, common.SubscriptionPositionEarliest, false, 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
d.AddTarget(newTarget(&StreamConfig{VChannel: "mock_pchannel_0_1v0"}))
|
d.AddTarget(newTarget(&StreamConfig{VChannel: "mock_pchannel_0_1v0"}))
|
||||||
d.AddTarget(newTarget(&StreamConfig{
|
d.AddTarget(newTarget(&StreamConfig{
|
||||||
|
|||||||
@ -40,6 +40,8 @@ import (
|
|||||||
type DispatcherManager interface {
|
type DispatcherManager interface {
|
||||||
Add(ctx context.Context, streamConfig *StreamConfig) (<-chan *MsgPack, error)
|
Add(ctx context.Context, streamConfig *StreamConfig) (<-chan *MsgPack, error)
|
||||||
Remove(vchannel string)
|
Remove(vchannel string)
|
||||||
|
NumTarget() int
|
||||||
|
NumConsumer() int
|
||||||
Run()
|
Run()
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
@ -53,9 +55,7 @@ type dispatcherManager struct {
|
|||||||
|
|
||||||
registeredTargets *typeutil.ConcurrentMap[string, *target]
|
registeredTargets *typeutil.ConcurrentMap[string, *target]
|
||||||
|
|
||||||
numConsumer atomic.Int64
|
mu sync.RWMutex
|
||||||
numActiveTarget atomic.Int64
|
|
||||||
|
|
||||||
mainDispatcher *Dispatcher
|
mainDispatcher *Dispatcher
|
||||||
deputyDispatchers map[int64]*Dispatcher // ID -> *Dispatcher
|
deputyDispatchers map[int64]*Dispatcher // ID -> *Dispatcher
|
||||||
|
|
||||||
@ -96,9 +96,26 @@ func (c *dispatcherManager) Remove(vchannel string) {
|
|||||||
zap.Int64("nodeID", c.nodeID), zap.String("vchannel", vchannel))
|
zap.Int64("nodeID", c.nodeID), zap.String("vchannel", vchannel))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
c.removeTargetFromDispatcher(t)
|
||||||
t.close()
|
t.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *dispatcherManager) NumTarget() int {
|
||||||
|
return c.registeredTargets.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *dispatcherManager) NumConsumer() int {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
|
numConsumer := 0
|
||||||
|
if c.mainDispatcher != nil {
|
||||||
|
numConsumer++
|
||||||
|
}
|
||||||
|
numConsumer += len(c.deputyDispatchers)
|
||||||
|
return numConsumer
|
||||||
|
}
|
||||||
|
|
||||||
func (c *dispatcherManager) Close() {
|
func (c *dispatcherManager) Close() {
|
||||||
c.closeOnce.Do(func() {
|
c.closeOnce.Do(func() {
|
||||||
c.closeChan <- struct{}{}
|
c.closeChan <- struct{}{}
|
||||||
@ -123,30 +140,46 @@ func (c *dispatcherManager) Run() {
|
|||||||
c.tryRemoveUnregisteredTargets()
|
c.tryRemoveUnregisteredTargets()
|
||||||
c.tryBuildDispatcher()
|
c.tryBuildDispatcher()
|
||||||
c.tryMerge()
|
c.tryMerge()
|
||||||
c.updateNumInfo()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dispatcherManager) updateNumInfo() {
|
func (c *dispatcherManager) removeTargetFromDispatcher(t *target) {
|
||||||
numConsumer := 0
|
log := log.With(zap.String("role", c.role), zap.Int64("nodeID", c.nodeID), zap.String("pchannel", c.pchannel))
|
||||||
numActiveTarget := 0
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
for _, dispatcher := range c.deputyDispatchers {
|
||||||
|
if dispatcher.HasTarget(t.vchannel) {
|
||||||
|
dispatcher.Handle(pause)
|
||||||
|
dispatcher.RemoveTarget(t.vchannel)
|
||||||
|
if dispatcher.TargetNum() == 0 {
|
||||||
|
dispatcher.Handle(terminate)
|
||||||
|
delete(c.deputyDispatchers, dispatcher.ID())
|
||||||
|
log.Info("remove deputy dispatcher done", zap.Int64("id", dispatcher.ID()))
|
||||||
|
} else {
|
||||||
|
dispatcher.Handle(resume)
|
||||||
|
}
|
||||||
|
t.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
if c.mainDispatcher != nil {
|
if c.mainDispatcher != nil {
|
||||||
numConsumer++
|
if c.mainDispatcher.HasTarget(t.vchannel) {
|
||||||
numActiveTarget += c.mainDispatcher.TargetNum()
|
c.mainDispatcher.Handle(pause)
|
||||||
|
c.mainDispatcher.RemoveTarget(t.vchannel)
|
||||||
|
if c.mainDispatcher.TargetNum() == 0 && len(c.deputyDispatchers) == 0 {
|
||||||
|
c.mainDispatcher.Handle(terminate)
|
||||||
|
c.mainDispatcher = nil
|
||||||
|
} else {
|
||||||
|
c.mainDispatcher.Handle(resume)
|
||||||
|
}
|
||||||
|
t.close()
|
||||||
}
|
}
|
||||||
numConsumer += len(c.deputyDispatchers)
|
|
||||||
c.numConsumer.Store(int64(numConsumer))
|
|
||||||
|
|
||||||
for _, d := range c.deputyDispatchers {
|
|
||||||
numActiveTarget += d.TargetNum()
|
|
||||||
}
|
}
|
||||||
c.numActiveTarget.Store(int64(numActiveTarget))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dispatcherManager) tryRemoveUnregisteredTargets() {
|
func (c *dispatcherManager) tryRemoveUnregisteredTargets() {
|
||||||
log := log.With(zap.String("role", c.role), zap.Int64("nodeID", c.nodeID), zap.String("pchannel", c.pchannel))
|
|
||||||
unregisteredTargets := make([]*target, 0)
|
unregisteredTargets := make([]*target, 0)
|
||||||
|
c.mu.RLock()
|
||||||
for _, dispatcher := range c.deputyDispatchers {
|
for _, dispatcher := range c.deputyDispatchers {
|
||||||
for _, t := range dispatcher.GetTargets() {
|
for _, t := range dispatcher.GetTargets() {
|
||||||
if !c.registeredTargets.Contain(t.vchannel) {
|
if !c.registeredTargets.Contain(t.vchannel) {
|
||||||
@ -161,36 +194,10 @@ func (c *dispatcherManager) tryRemoveUnregisteredTargets() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, dispatcher := range c.deputyDispatchers {
|
c.mu.RUnlock()
|
||||||
|
|
||||||
for _, t := range unregisteredTargets {
|
for _, t := range unregisteredTargets {
|
||||||
if dispatcher.HasTarget(t.vchannel) {
|
c.removeTargetFromDispatcher(t)
|
||||||
dispatcher.Handle(pause)
|
|
||||||
dispatcher.RemoveTarget(t.vchannel)
|
|
||||||
if dispatcher.TargetNum() == 0 {
|
|
||||||
dispatcher.Handle(terminate)
|
|
||||||
delete(c.deputyDispatchers, dispatcher.ID())
|
|
||||||
log.Info("remove deputy dispatcher done", zap.Int64("id", dispatcher.ID()))
|
|
||||||
} else {
|
|
||||||
dispatcher.Handle(resume)
|
|
||||||
}
|
|
||||||
t.close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.mainDispatcher != nil {
|
|
||||||
for _, t := range unregisteredTargets {
|
|
||||||
if c.mainDispatcher.HasTarget(t.vchannel) {
|
|
||||||
c.mainDispatcher.Handle(pause)
|
|
||||||
c.mainDispatcher.RemoveTarget(t.vchannel)
|
|
||||||
if c.mainDispatcher.TargetNum() == 0 && len(c.deputyDispatchers) == 0 {
|
|
||||||
c.mainDispatcher.Handle(terminate)
|
|
||||||
c.mainDispatcher = nil
|
|
||||||
} else {
|
|
||||||
c.mainDispatcher.Handle(resume)
|
|
||||||
}
|
|
||||||
t.close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,6 +209,7 @@ func (c *dispatcherManager) tryBuildDispatcher() {
|
|||||||
// get lack targets to perform subscription
|
// get lack targets to perform subscription
|
||||||
lackTargets := make([]*target, 0, len(allTargets))
|
lackTargets := make([]*target, 0, len(allTargets))
|
||||||
|
|
||||||
|
c.mu.RLock()
|
||||||
OUTER:
|
OUTER:
|
||||||
for _, t := range allTargets {
|
for _, t := range allTargets {
|
||||||
if c.mainDispatcher != nil && c.mainDispatcher.HasTarget(t.vchannel) {
|
if c.mainDispatcher != nil && c.mainDispatcher.HasTarget(t.vchannel) {
|
||||||
@ -214,6 +222,7 @@ OUTER:
|
|||||||
}
|
}
|
||||||
lackTargets = append(lackTargets, t)
|
lackTargets = append(lackTargets, t)
|
||||||
}
|
}
|
||||||
|
c.mu.RUnlock()
|
||||||
|
|
||||||
if len(lackTargets) == 0 {
|
if len(lackTargets) == 0 {
|
||||||
return
|
return
|
||||||
@ -235,6 +244,19 @@ OUTER:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For CDC, CDC needs to includeCurrentMsg when create new dispatcher
|
||||||
|
// and NOT includeCurrentMsg when create lag dispatcher. So if any dispatcher lagged,
|
||||||
|
// we give up batch subscription and create dispatcher for only one target.
|
||||||
|
includeCurrentMsg := false
|
||||||
|
for _, candidate := range candidateTargets {
|
||||||
|
if candidate.isLagged {
|
||||||
|
candidateTargets = []*target{candidate}
|
||||||
|
includeCurrentMsg = true
|
||||||
|
candidate.isLagged = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vchannels := lo.Map(candidateTargets, func(t *target, _ int) string {
|
vchannels := lo.Map(candidateTargets, func(t *target, _ int) string {
|
||||||
return t.vchannel
|
return t.vchannel
|
||||||
})
|
})
|
||||||
@ -247,7 +269,7 @@ OUTER:
|
|||||||
|
|
||||||
// TODO: add newDispatcher timeout param and init context
|
// TODO: add newDispatcher timeout param and init context
|
||||||
id := c.idAllocator.Inc()
|
id := c.idAllocator.Inc()
|
||||||
d, err := NewDispatcher(context.Background(), c.factory, id, c.pchannel, earliestTarget.pos, earliestTarget.subPos, latestTarget.pos.GetTimestamp())
|
d, err := NewDispatcher(context.Background(), c.factory, id, c.pchannel, earliestTarget.pos, earliestTarget.subPos, includeCurrentMsg, latestTarget.pos.GetTimestamp())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -281,6 +303,21 @@ OUTER:
|
|||||||
zap.Strings("vchannels", vchannels),
|
zap.Strings("vchannels", vchannels),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
d.Handle(pause)
|
||||||
|
for _, candidate := range candidateTargets {
|
||||||
|
vchannel := candidate.vchannel
|
||||||
|
t, ok := c.registeredTargets.Get(vchannel)
|
||||||
|
// During the build process, the target may undergo repeated deregister and register,
|
||||||
|
// causing the channel object to change. Here, validate whether the channel is the
|
||||||
|
// same as before the build. If inconsistent, remove the target.
|
||||||
|
if !ok || t.ch != candidate.ch {
|
||||||
|
d.RemoveTarget(vchannel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.Handle(resume)
|
||||||
if c.mainDispatcher == nil {
|
if c.mainDispatcher == nil {
|
||||||
c.mainDispatcher = d
|
c.mainDispatcher = d
|
||||||
log.Info("add main dispatcher", zap.Int64("id", d.ID()))
|
log.Info("add main dispatcher", zap.Int64("id", d.ID()))
|
||||||
@ -291,6 +328,9 @@ OUTER:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dispatcherManager) tryMerge() {
|
func (c *dispatcherManager) tryMerge() {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log := log.With(zap.String("role", c.role), zap.Int64("nodeID", c.nodeID), zap.String("pchannel", c.pchannel))
|
log := log.With(zap.String("role", c.role), zap.Int64("nodeID", c.nodeID), zap.String("pchannel", c.pchannel))
|
||||||
|
|
||||||
@ -352,6 +392,9 @@ func (c *dispatcherManager) deleteMetric(channel string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *dispatcherManager) uploadMetric() {
|
func (c *dispatcherManager) uploadMetric() {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
nodeIDStr := fmt.Sprintf("%d", c.nodeID)
|
nodeIDStr := fmt.Sprintf("%d", c.nodeID)
|
||||||
fn := func(gauge *prometheus.GaugeVec) {
|
fn := func(gauge *prometheus.GaugeVec) {
|
||||||
if c.mainDispatcher == nil {
|
if c.mainDispatcher == nil {
|
||||||
|
|||||||
@ -50,8 +50,8 @@ func TestManager(t *testing.T) {
|
|||||||
assert.NotNil(t, c)
|
assert.NotNil(t, c)
|
||||||
go c.Run()
|
go c.Run()
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
assert.Equal(t, int64(0), c.(*dispatcherManager).numConsumer.Load())
|
assert.Equal(t, 0, c.NumConsumer())
|
||||||
assert.Equal(t, 0, c.(*dispatcherManager).registeredTargets.Len())
|
assert.Equal(t, 0, c.NumTarget())
|
||||||
|
|
||||||
var offset int
|
var offset int
|
||||||
for i := 0; i < 30; i++ {
|
for i := 0; i < 30; i++ {
|
||||||
@ -64,8 +64,8 @@ func TestManager(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
t.Logf("offset=%d, numConsumer=%d, numTarget=%d", offset, c.(*dispatcherManager).numConsumer.Load(), c.(*dispatcherManager).registeredTargets.Len())
|
t.Logf("offset=%d, numConsumer=%d, numTarget=%d", offset, c.NumConsumer(), c.NumTarget())
|
||||||
return c.(*dispatcherManager).registeredTargets.Len() == offset
|
return c.NumTarget() == offset
|
||||||
}, 3*time.Second, 10*time.Millisecond)
|
}, 3*time.Second, 10*time.Millisecond)
|
||||||
for j := 0; j < rand.Intn(r); j++ {
|
for j := 0; j < rand.Intn(r); j++ {
|
||||||
vchannel := fmt.Sprintf("%s_vchannelv%d", pchannel, offset)
|
vchannel := fmt.Sprintf("%s_vchannelv%d", pchannel, offset)
|
||||||
@ -74,8 +74,8 @@ func TestManager(t *testing.T) {
|
|||||||
offset--
|
offset--
|
||||||
}
|
}
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
t.Logf("offset=%d, numConsumer=%d, numTarget=%d", offset, c.(*dispatcherManager).numConsumer.Load(), c.(*dispatcherManager).registeredTargets.Len())
|
t.Logf("offset=%d, numConsumer=%d, numTarget=%d", offset, c.NumConsumer(), c.NumTarget())
|
||||||
return c.(*dispatcherManager).registeredTargets.Len() == offset
|
return c.NumTarget() == offset
|
||||||
}, 3*time.Second, 10*time.Millisecond)
|
}, 3*time.Second, 10*time.Millisecond)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -108,7 +108,7 @@ func TestManager(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
o2, err := c.Add(ctx, NewStreamConfig(fmt.Sprintf("%s_vchannel-2", pchannel), nil, common.SubscriptionPositionUnknown))
|
o2, err := c.Add(ctx, NewStreamConfig(fmt.Sprintf("%s_vchannel-2", pchannel), nil, common.SubscriptionPositionUnknown))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 3, c.(*dispatcherManager).registeredTargets.Len())
|
assert.Equal(t, 3, c.NumTarget())
|
||||||
|
|
||||||
consumeFn := func(output <-chan *MsgPack, done <-chan struct{}, wg *sync.WaitGroup) {
|
consumeFn := func(output <-chan *MsgPack, done <-chan struct{}, wg *sync.WaitGroup) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@ -130,14 +130,14 @@ func TestManager(t *testing.T) {
|
|||||||
go consumeFn(o2, d2, wg)
|
go consumeFn(o2, d2, wg)
|
||||||
|
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
return c.(*dispatcherManager).numConsumer.Load() == 1 // expected merge
|
return c.NumConsumer() == 1 // expected merge
|
||||||
}, 20*time.Second, 10*time.Millisecond)
|
}, 20*time.Second, 10*time.Millisecond)
|
||||||
|
|
||||||
// stop consume vchannel_2 to trigger split
|
// stop consume vchannel_2 to trigger split
|
||||||
d2 <- struct{}{}
|
d2 <- struct{}{}
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
t.Logf("c.NumConsumer=%d", c.(*dispatcherManager).numConsumer.Load())
|
t.Logf("c.NumConsumer=%d", c.NumConsumer())
|
||||||
return c.(*dispatcherManager).numConsumer.Load() == 2 // expected split
|
return c.NumConsumer() == 2 // expected split
|
||||||
}, 20*time.Second, 10*time.Millisecond)
|
}, 20*time.Second, 10*time.Millisecond)
|
||||||
|
|
||||||
// stop all
|
// stop all
|
||||||
@ -169,9 +169,9 @@ func TestManager(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
_, err = c.Add(ctx, NewStreamConfig(fmt.Sprintf("%s_vchannel-2", pchannel), nil, common.SubscriptionPositionUnknown))
|
_, err = c.Add(ctx, NewStreamConfig(fmt.Sprintf("%s_vchannel-2", pchannel), nil, common.SubscriptionPositionUnknown))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 3, c.(*dispatcherManager).registeredTargets.Len())
|
assert.Equal(t, 3, c.NumTarget())
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
return c.(*dispatcherManager).numConsumer.Load() >= 1
|
return c.NumConsumer() >= 1
|
||||||
}, 3*time.Second, 10*time.Millisecond)
|
}, 3*time.Second, 10*time.Millisecond)
|
||||||
c.(*dispatcherManager).mainDispatcher.curTs.Store(1000)
|
c.(*dispatcherManager).mainDispatcher.curTs.Store(1000)
|
||||||
for _, d := range c.(*dispatcherManager).deputyDispatchers {
|
for _, d := range c.(*dispatcherManager).deputyDispatchers {
|
||||||
@ -183,9 +183,9 @@ func TestManager(t *testing.T) {
|
|||||||
defer paramtable.Get().Reset(checkIntervalK)
|
defer paramtable.Get().Reset(checkIntervalK)
|
||||||
|
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
return c.(*dispatcherManager).numConsumer.Load() == 1 // expected merged
|
return c.(*dispatcherManager).NumConsumer() == 1 // expected merged
|
||||||
}, 3*time.Second, 10*time.Millisecond)
|
}, 3*time.Second, 10*time.Millisecond)
|
||||||
assert.Equal(t, 3, c.(*dispatcherManager).registeredTargets.Len())
|
assert.Equal(t, 3, c.NumTarget())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("test_repeated_vchannel", func(t *testing.T) {
|
t.Run("test_repeated_vchannel", func(t *testing.T) {
|
||||||
@ -220,7 +220,7 @@ func TestManager(t *testing.T) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
return c.(*dispatcherManager).numConsumer.Load() >= 1
|
return c.NumConsumer() >= 1
|
||||||
}, 3*time.Second, 10*time.Millisecond)
|
}, 3*time.Second, 10*time.Millisecond)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -34,6 +34,7 @@ type target struct {
|
|||||||
ch chan *MsgPack
|
ch chan *MsgPack
|
||||||
subPos SubPos
|
subPos SubPos
|
||||||
pos *Pos
|
pos *Pos
|
||||||
|
isLagged bool
|
||||||
|
|
||||||
closeMu sync.Mutex
|
closeMu sync.Mutex
|
||||||
closeOnce sync.Once
|
closeOnce sync.Once
|
||||||
@ -75,6 +76,7 @@ func (t *target) close() {
|
|||||||
t.closed = true
|
t.closed = true
|
||||||
t.timer.Stop()
|
t.timer.Stop()
|
||||||
close(t.ch)
|
close(t.ch)
|
||||||
|
log.Info("close target chan", zap.String("vchannel", t.vchannel))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,6 +99,7 @@ func (t *target) send(pack *MsgPack) error {
|
|||||||
log.Info("target closed", zap.String("vchannel", t.vchannel))
|
log.Info("target closed", zap.String("vchannel", t.vchannel))
|
||||||
return nil
|
return nil
|
||||||
case <-t.timer.C:
|
case <-t.timer.C:
|
||||||
|
t.isLagged = true
|
||||||
return fmt.Errorf("send target timeout, vchannel=%s, timeout=%s, beginTs=%d, endTs=%d", t.vchannel, t.maxLag, pack.BeginTs, pack.EndTs)
|
return fmt.Errorf("send target timeout, vchannel=%s, timeout=%s, beginTs=%d, endTs=%d", t.vchannel, t.maxLag, pack.BeginTs, pack.EndTs)
|
||||||
case t.ch <- pack:
|
case t.ch <- pack:
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -474,7 +474,7 @@ func TestSearchGroupByUnsupportedDataType(t *testing.T) {
|
|||||||
common.DefaultFloatFieldName, common.DefaultDoubleFieldName,
|
common.DefaultFloatFieldName, common.DefaultDoubleFieldName,
|
||||||
common.DefaultJSONFieldName, common.DefaultFloatVecFieldName, common.DefaultInt8ArrayField, common.DefaultFloatArrayField,
|
common.DefaultJSONFieldName, common.DefaultFloatVecFieldName, common.DefaultInt8ArrayField, common.DefaultFloatArrayField,
|
||||||
} {
|
} {
|
||||||
_, err := mc.Search(ctx, client.NewSearchOption(collName, common.DefaultLimit, queryVec).WithGroupByField(unsupportedField).WithANNSField(common.DefaultFloatVecFieldName))
|
_, err := mc.Search(ctx, client.NewSearchOption(collName, common.DefaultLimit, queryVec).WithGroupByField(unsupportedField).WithANNSField(common.DefaultFloatVecFieldName).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, false, "unsupported data type")
|
common.CheckErr(t, err, false, "unsupported data type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -495,7 +495,7 @@ func TestSearchGroupByRangeSearch(t *testing.T) {
|
|||||||
|
|
||||||
// range search
|
// range search
|
||||||
_, err := mc.Search(ctx, client.NewSearchOption(collName, common.DefaultLimit, queryVec).WithGroupByField(common.DefaultVarcharFieldName).
|
_, err := mc.Search(ctx, client.NewSearchOption(collName, common.DefaultLimit, queryVec).WithGroupByField(common.DefaultVarcharFieldName).
|
||||||
WithANNSField(common.DefaultFloatVecFieldName).WithSearchParam("radius", "0").WithSearchParam("range_filter", "0.8"))
|
WithANNSField(common.DefaultFloatVecFieldName).WithSearchParam("radius", "0").WithSearchParam("range_filter", "0.8").WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, false, "Not allowed to do range-search when doing search-group-by")
|
common.CheckErr(t, err, false, "Not allowed to do range-search when doing search-group-by")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -268,7 +268,7 @@ func TestHybridSearchMultiVectorsPagination(t *testing.T) {
|
|||||||
|
|
||||||
// offset 0, -1 -> 0
|
// offset 0, -1 -> 0
|
||||||
for _, offset := range []int{0, -1} {
|
for _, offset := range []int{0, -1} {
|
||||||
searchRes, err := mc.HybridSearch(ctx, client.NewHybridSearchOption(schema.CollectionName, common.DefaultLimit, annReqDef).WithOffset(offset))
|
searchRes, err := mc.HybridSearch(ctx, client.NewHybridSearchOption(schema.CollectionName, common.DefaultLimit, annReqDef).WithOffset(offset).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit)
|
common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -65,14 +65,14 @@ func TestQueryVarcharPkDefault(t *testing.T) {
|
|||||||
|
|
||||||
// query
|
// query
|
||||||
expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4']", common.DefaultVarcharFieldName)
|
expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4']", common.DefaultVarcharFieldName)
|
||||||
queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr))
|
queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
common.CheckQueryResult(t, queryRes.Fields, []column.Column{insertRes.IDs.Slice(0, 5)})
|
common.CheckQueryResult(t, queryRes.Fields, []column.Column{insertRes.IDs.Slice(0, 5)})
|
||||||
|
|
||||||
// get ids -> same result with query
|
// get ids -> same result with query
|
||||||
varcharValues := []string{"0", "1", "2", "3", "4"}
|
varcharValues := []string{"0", "1", "2", "3", "4"}
|
||||||
ids := column.NewColumnVarChar(common.DefaultVarcharFieldName, varcharValues)
|
ids := column.NewColumnVarChar(common.DefaultVarcharFieldName, varcharValues)
|
||||||
getRes, errGet := mc.Get(ctx, client.NewQueryOption(schema.CollectionName).WithIDs(ids))
|
getRes, errGet := mc.Get(ctx, client.NewQueryOption(schema.CollectionName).WithIDs(ids).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, errGet, true)
|
common.CheckErr(t, errGet, true)
|
||||||
common.CheckQueryResult(t, getRes.Fields, []column.Column{insertRes.IDs.Slice(0, 5)})
|
common.CheckQueryResult(t, getRes.Fields, []column.Column{insertRes.IDs.Slice(0, 5)})
|
||||||
}
|
}
|
||||||
@ -1094,12 +1094,12 @@ func TestQueryWithTemplateParam(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// default
|
// default
|
||||||
queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
||||||
WithFilter(fmt.Sprintf("%s in {int64Values}", common.DefaultInt64FieldName)).WithTemplateParam("int64Values", int64Values))
|
WithFilter(fmt.Sprintf("%s in {int64Values}", common.DefaultInt64FieldName)).WithTemplateParam("int64Values", int64Values).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
common.CheckQueryResult(t, queryRes.Fields, []column.Column{column.NewColumnInt64(common.DefaultInt64FieldName, int64Values)})
|
common.CheckQueryResult(t, queryRes.Fields, []column.Column{column.NewColumnInt64(common.DefaultInt64FieldName, int64Values)})
|
||||||
|
|
||||||
// cover keys
|
// cover keys
|
||||||
res, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter("int64 < {k2}").WithTemplateParam("k2", 10).WithTemplateParam("k2", 5))
|
res, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter("int64 < {k2}").WithTemplateParam("k2", 10).WithTemplateParam("k2", 5).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
require.Equal(t, 5, res.ResultCount)
|
require.Equal(t, 5, res.ResultCount)
|
||||||
|
|
||||||
@ -1107,14 +1107,14 @@ func TestQueryWithTemplateParam(t *testing.T) {
|
|||||||
anyValues := []int64{0.0, 100.0, 10000.0}
|
anyValues := []int64{0.0, 100.0, 10000.0}
|
||||||
countRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
countRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
||||||
WithFilter(fmt.Sprintf("json_contains_any (%s, {any_values})", common.DefaultFloatArrayField)).WithTemplateParam("any_values", anyValues).
|
WithFilter(fmt.Sprintf("json_contains_any (%s, {any_values})", common.DefaultFloatArrayField)).WithTemplateParam("any_values", anyValues).
|
||||||
WithOutputFields(common.QueryCountFieldName))
|
WithOutputFields(common.QueryCountFieldName).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
count, _ := countRes.Fields[0].GetAsInt64(0)
|
count, _ := countRes.Fields[0].GetAsInt64(0)
|
||||||
require.EqualValues(t, 101, count)
|
require.EqualValues(t, 101, count)
|
||||||
|
|
||||||
// dynamic
|
// dynamic
|
||||||
countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
||||||
WithFilter("dynamicNumber % 2 == {v}").WithTemplateParam("v", 0).WithOutputFields(common.QueryCountFieldName))
|
WithFilter("dynamicNumber % 2 == {v}").WithTemplateParam("v", 0).WithOutputFields(common.QueryCountFieldName).WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
count, _ = countRes.Fields[0].GetAsInt64(0)
|
count, _ = countRes.Fields[0].GetAsInt64(0)
|
||||||
require.EqualValues(t, 1500, count)
|
require.EqualValues(t, 1500, count)
|
||||||
@ -1123,7 +1123,8 @@ func TestQueryWithTemplateParam(t *testing.T) {
|
|||||||
countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
||||||
WithFilter(fmt.Sprintf("%s['bool'] == {v}", common.DefaultJSONFieldName)).
|
WithFilter(fmt.Sprintf("%s['bool'] == {v}", common.DefaultJSONFieldName)).
|
||||||
WithTemplateParam("v", false).
|
WithTemplateParam("v", false).
|
||||||
WithOutputFields(common.QueryCountFieldName))
|
WithOutputFields(common.QueryCountFieldName).
|
||||||
|
WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
count, _ = countRes.Fields[0].GetAsInt64(0)
|
count, _ = countRes.Fields[0].GetAsInt64(0)
|
||||||
require.EqualValues(t, 1500/2, count)
|
require.EqualValues(t, 1500/2, count)
|
||||||
@ -1132,7 +1133,8 @@ func TestQueryWithTemplateParam(t *testing.T) {
|
|||||||
countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
||||||
WithFilter(fmt.Sprintf("%s == {v}", common.DefaultBoolFieldName)).
|
WithFilter(fmt.Sprintf("%s == {v}", common.DefaultBoolFieldName)).
|
||||||
WithTemplateParam("v", true).
|
WithTemplateParam("v", true).
|
||||||
WithOutputFields(common.QueryCountFieldName))
|
WithOutputFields(common.QueryCountFieldName).
|
||||||
|
WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
count, _ = countRes.Fields[0].GetAsInt64(0)
|
count, _ = countRes.Fields[0].GetAsInt64(0)
|
||||||
require.EqualValues(t, common.DefaultNb/2, count)
|
require.EqualValues(t, common.DefaultNb/2, count)
|
||||||
@ -1141,7 +1143,8 @@ func TestQueryWithTemplateParam(t *testing.T) {
|
|||||||
res, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
res, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).
|
||||||
WithFilter(fmt.Sprintf("%s >= {k1} && %s < {k2}", common.DefaultInt64FieldName, common.DefaultInt64FieldName)).
|
WithFilter(fmt.Sprintf("%s >= {k1} && %s < {k2}", common.DefaultInt64FieldName, common.DefaultInt64FieldName)).
|
||||||
WithTemplateParam("v", 0).WithTemplateParam("k1", 1000).
|
WithTemplateParam("v", 0).WithTemplateParam("k1", 1000).
|
||||||
WithTemplateParam("k2", 2000))
|
WithTemplateParam("k2", 2000).
|
||||||
|
WithConsistencyLevel(entity.ClStrong))
|
||||||
common.CheckErr(t, err, true)
|
common.CheckErr(t, err, true)
|
||||||
require.EqualValues(t, 1000, res.ResultCount)
|
require.EqualValues(t, 1000, res.ResultCount)
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user