milvus/internal/datanode/io/io_pool_test.go
XuanYang-cn a33b68678d
enhance: [cherry-pick] Move compactor into sub package (#34098)
This PR consists of the following commits:

- enhance: Tidy compactor and remove dup codes (#32198)
- fix: Fix l0 compactor may cause DN from OOM (#33554)
- enhance: Add deltaRowCount in l0 compaction (#33997)
- enhance: enable stream writer in compactions (#32612)
- fix: turn on compression on stream writers (#34067)
- fix: adding blob memory size in binlog serde (#33324)

See also: #32451, #33547, #33998, #31679
pr: #32198, #33554, #33997, #32612

---------

Signed-off-by: yangxuan <xuan.yang@zilliz.com>
Signed-off-by: Ted Xu <ted.xu@zilliz.com>
Co-authored-by: Ted Xu <ted.xu@zilliz.com>
2024-06-25 11:16:02 +08:00

72 lines
1.9 KiB
Go

package io
import (
"strconv"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/pkg/config"
"github.com/milvus-io/milvus/pkg/util/conc"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/paramtable"
)
func TestResizePools(t *testing.T) {
paramtable.Init()
pt := paramtable.Get()
defer func() {
pt.Reset(pt.QueryNodeCfg.BloomFilterApplyParallelFactor.Key)
}()
t.Run("BfApplyPool", func(t *testing.T) {
expectedCap := hardware.GetCPUNum() * pt.DataNodeCfg.BloomFilterApplyParallelFactor.GetAsInt()
ResizeBFApplyPool(&config.Event{
HasUpdated: true,
})
assert.Equal(t, expectedCap, GetBFApplyPool().Cap())
pt.Save(pt.DataNodeCfg.BloomFilterApplyParallelFactor.Key, strconv.FormatFloat(pt.DataNodeCfg.BloomFilterApplyParallelFactor.GetAsFloat()*2, 'f', 10, 64))
ResizeBFApplyPool(&config.Event{
HasUpdated: true,
})
assert.Equal(t, expectedCap, GetBFApplyPool().Cap())
pt.Save(pt.DataNodeCfg.BloomFilterApplyParallelFactor.Key, "0")
ResizeBFApplyPool(&config.Event{
HasUpdated: true,
})
assert.Equal(t, expectedCap, GetBFApplyPool().Cap())
})
}
func TestGetOrCreateIOPool(t *testing.T) {
paramtable.Init()
ioConcurrency := paramtable.Get().DataNodeCfg.IOConcurrency.GetValue()
paramtable.Get().Save(paramtable.Get().DataNodeCfg.IOConcurrency.Key, "64")
defer func() { paramtable.Get().Save(paramtable.Get().DataNodeCfg.IOConcurrency.Key, ioConcurrency) }()
nP := 10
nTask := 10
wg := sync.WaitGroup{}
for i := 0; i < nP; i++ {
wg.Add(1)
go func() {
defer wg.Done()
p := GetOrCreateIOPool()
futures := make([]*conc.Future[any], 0, nTask)
for j := 0; j < nTask; j++ {
future := p.Submit(func() (interface{}, error) {
return nil, nil
})
futures = append(futures, future)
}
err := conc.AwaitAll(futures...)
assert.NoError(t, err)
}()
}
wg.Wait()
}