mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 17:48:29 +08:00
enhance: Enhance import integration tests and logs (#42612)
1. Optimize the import process: skip subsequent steps and mark the task as complete if the number of imported rows is 0. 2. Improve import integration tests: a. Add a test to verify that autoIDs are not duplicated b. Add a test for the corner case where all data is deleted c. Shorten test execution time 3. Enhance import logging: a. Print imported segment information upon completion b. Include file name in failure logs issue: https://github.com/milvus-io/milvus/issues/42488, https://github.com/milvus-io/milvus/issues/42518 Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
parent
98067f5fc6
commit
86876682da
@ -194,10 +194,6 @@ func (c *importChecker) getLackFilesForImports(job ImportJob) []*datapb.ImportFi
|
|||||||
preimports := c.importMeta.GetTaskBy(c.ctx, WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
preimports := c.importMeta.GetTaskBy(c.ctx, WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
||||||
lacks := make(map[int64]*datapb.ImportFileStats, 0)
|
lacks := make(map[int64]*datapb.ImportFileStats, 0)
|
||||||
for _, t := range preimports {
|
for _, t := range preimports {
|
||||||
if t.GetState() != datapb.ImportTaskStateV2_Completed {
|
|
||||||
// Preimport tasks are not fully completed, thus generating imports should not be triggered.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, stat := range t.GetFileStats() {
|
for _, stat := range t.GetFileStats() {
|
||||||
lacks[stat.GetImportFile().GetId()] = stat
|
lacks[stat.GetImportFile().GetId()] = stat
|
||||||
}
|
}
|
||||||
@ -245,6 +241,37 @@ func (c *importChecker) checkPendingJob(job ImportJob) {
|
|||||||
|
|
||||||
func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
||||||
log := log.With(zap.Int64("jobID", job.GetJobID()))
|
log := log.With(zap.Int64("jobID", job.GetJobID()))
|
||||||
|
|
||||||
|
preimports := c.importMeta.GetTaskBy(c.ctx, WithType(PreImportTaskType), WithJob(job.GetJobID()))
|
||||||
|
totalRows := int64(0)
|
||||||
|
for _, t := range preimports {
|
||||||
|
if t.GetState() != datapb.ImportTaskStateV2_Completed {
|
||||||
|
// Preimport tasks are not fully completed, thus generating imports should not be triggered.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
totalRows += lo.SumBy(t.GetFileStats(), func(stat *datapb.ImportFileStats) int64 {
|
||||||
|
return stat.GetTotalRows()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
updateJobState := func(state internalpb.ImportJobState, actions ...UpdateJobAction) {
|
||||||
|
actions = append(actions, UpdateJobState(state))
|
||||||
|
err := c.importMeta.UpdateJob(c.ctx, job.GetJobID(), actions...)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("failed to update job state to Importing", zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
preImportDuration := job.GetTR().RecordSpan()
|
||||||
|
metrics.ImportJobLatency.WithLabelValues(metrics.ImportStagePreImport).Observe(float64(preImportDuration.Milliseconds()))
|
||||||
|
log.Info("import job preimport done", zap.String("state", state.String()), zap.Duration("jobTimeCost/preimport", preImportDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalRows == 0 {
|
||||||
|
log.Info("no data to import, skip the subsequent stages, just update job state to Completed")
|
||||||
|
updateJobState(internalpb.ImportJobState_Completed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
lacks := c.getLackFilesForImports(job)
|
lacks := c.getLackFilesForImports(job)
|
||||||
if len(lacks) == 0 {
|
if len(lacks) == 0 {
|
||||||
return
|
return
|
||||||
@ -253,10 +280,7 @@ func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
|||||||
requestSize, err := CheckDiskQuota(c.ctx, job, c.meta, c.importMeta)
|
requestSize, err := CheckDiskQuota(c.ctx, job, c.meta, c.importMeta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("import failed, disk quota exceeded", zap.Error(err))
|
log.Warn("import failed, disk quota exceeded", zap.Error(err))
|
||||||
err = c.importMeta.UpdateJob(c.ctx, job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed), UpdateJobReason(err.Error()))
|
updateJobState(internalpb.ImportJobState_Failed, UpdateJobReason(err.Error()))
|
||||||
if err != nil {
|
|
||||||
log.Warn("failed to update job state to Failed", zap.Error(err))
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -271,23 +295,13 @@ func (c *importChecker) checkPreImportingJob(job ImportJob) {
|
|||||||
err = c.importMeta.AddTask(c.ctx, t)
|
err = c.importMeta.AddTask(c.ctx, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("add new import task failed", WrapTaskLog(t, zap.Error(err))...)
|
log.Warn("add new import task failed", WrapTaskLog(t, zap.Error(err))...)
|
||||||
updateErr := c.importMeta.UpdateJob(c.ctx, job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Failed), UpdateJobReason(err.Error()))
|
updateJobState(internalpb.ImportJobState_Failed, UpdateJobReason(err.Error()))
|
||||||
if updateErr != nil {
|
|
||||||
log.Warn("failed to update job state to Failed", zap.Error(updateErr))
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Info("add new import task", WrapTaskLog(t)...)
|
log.Info("add new import task", WrapTaskLog(t)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.importMeta.UpdateJob(c.ctx, job.GetJobID(), UpdateJobState(internalpb.ImportJobState_Importing), UpdateRequestedDiskSize(requestSize))
|
updateJobState(internalpb.ImportJobState_Importing, UpdateRequestedDiskSize(requestSize))
|
||||||
if err != nil {
|
|
||||||
log.Warn("failed to update job state to Importing", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
preImportDuration := job.GetTR().RecordSpan()
|
|
||||||
metrics.ImportJobLatency.WithLabelValues(metrics.ImportStagePreImport).Observe(float64(preImportDuration.Milliseconds()))
|
|
||||||
log.Info("import job preimport done", zap.Duration("jobTimeCost/preimport", preImportDuration))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *importChecker) checkImportingJob(job ImportJob) {
|
func (c *importChecker) checkImportingJob(job ImportJob) {
|
||||||
@ -318,7 +332,7 @@ func (c *importChecker) checkStatsJob(job ImportJob) {
|
|||||||
}
|
}
|
||||||
statsDuration := job.GetTR().RecordSpan()
|
statsDuration := job.GetTR().RecordSpan()
|
||||||
metrics.ImportJobLatency.WithLabelValues(metrics.ImportStageStats).Observe(float64(statsDuration.Milliseconds()))
|
metrics.ImportJobLatency.WithLabelValues(metrics.ImportStageStats).Observe(float64(statsDuration.Milliseconds()))
|
||||||
log.Info("import job stats done", zap.Duration("jobTimeCost/stats", statsDuration))
|
log.Info("import job stats done", zap.String("state", state.String()), zap.Duration("jobTimeCost/stats", statsDuration))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip stats stage if not enable stats or is l0 import.
|
// Skip stats stage if not enable stats or is l0 import.
|
||||||
|
|||||||
@ -186,9 +186,15 @@ func (s *ImportCheckerSuite) TestCheckJob() {
|
|||||||
s.Equal(internalpb.ImportJobState_PreImporting, s.importMeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
s.Equal(internalpb.ImportJobState_PreImporting, s.importMeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||||
|
|
||||||
// test checkPreImportingJob
|
// test checkPreImportingJob
|
||||||
|
fileStats := []*datapb.ImportFileStats{
|
||||||
|
{
|
||||||
|
TotalRows: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
catalog.EXPECT().SaveImportTask(mock.Anything, mock.Anything).Return(nil)
|
||||||
for _, t := range preimportTasks {
|
for _, t := range preimportTasks {
|
||||||
err := s.importMeta.UpdateTask(context.TODO(), t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
err := s.importMeta.UpdateTask(context.TODO(), t.GetTaskID(),
|
||||||
|
UpdateState(datapb.ImportTaskStateV2_Completed), UpdateFileStats(fileStats))
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -303,8 +309,14 @@ func (s *ImportCheckerSuite) TestCheckJob_Failed() {
|
|||||||
s.Equal(internalpb.ImportJobState_PreImporting, s.importMeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
s.Equal(internalpb.ImportJobState_PreImporting, s.importMeta.GetJob(context.TODO(), job.GetJobID()).GetState())
|
||||||
|
|
||||||
// test checkPreImportingJob
|
// test checkPreImportingJob
|
||||||
|
fileStats := []*datapb.ImportFileStats{
|
||||||
|
{
|
||||||
|
TotalRows: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
for _, t := range preimportTasks {
|
for _, t := range preimportTasks {
|
||||||
err := s.importMeta.UpdateTask(context.TODO(), t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
err := s.importMeta.UpdateTask(context.TODO(), t.GetTaskID(),
|
||||||
|
UpdateState(datapb.ImportTaskStateV2_Completed), UpdateFileStats(fileStats))
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -688,8 +700,14 @@ func TestImportCheckerCompaction(t *testing.T) {
|
|||||||
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil).Twice()
|
catalog.EXPECT().SavePreImportTask(mock.Anything, mock.Anything).Return(nil).Twice()
|
||||||
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil).Once()
|
catalog.EXPECT().SaveImportJob(mock.Anything, mock.Anything).Return(nil).Once()
|
||||||
preimportTasks := importMeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
preimportTasks := importMeta.GetTaskBy(context.TODO(), WithJob(job.GetJobID()), WithType(PreImportTaskType))
|
||||||
|
fileStats := []*datapb.ImportFileStats{
|
||||||
|
{
|
||||||
|
TotalRows: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
for _, pt := range preimportTasks {
|
for _, pt := range preimportTasks {
|
||||||
err := importMeta.UpdateTask(context.TODO(), pt.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Completed))
|
err := importMeta.UpdateTask(context.TODO(), pt.GetTaskID(),
|
||||||
|
UpdateState(datapb.ImportTaskStateV2_Completed), UpdateFileStats(fileStats))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
assert.Eventually(t, func() bool {
|
assert.Eventually(t, func() bool {
|
||||||
|
|||||||
@ -18,6 +18,7 @@ package importv2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -139,7 +140,7 @@ func (t *ImportTask) Clone() Task {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *ImportTask) Execute() []*conc.Future[any] {
|
func (t *ImportTask) Execute() []*conc.Future[any] {
|
||||||
bufferSize := paramtable.Get().DataNodeCfg.ImportInsertBufferSize.GetAsInt() * 1024 * 1024
|
bufferSize := paramtable.Get().DataNodeCfg.ImportInsertBufferSize.GetAsInt()
|
||||||
log.Info("start to import", WrapLogFields(t,
|
log.Info("start to import", WrapLogFields(t,
|
||||||
zap.Int("bufferSize", bufferSize),
|
zap.Int("bufferSize", bufferSize),
|
||||||
zap.Any("schema", t.GetSchema()))...)
|
zap.Any("schema", t.GetSchema()))...)
|
||||||
@ -151,7 +152,8 @@ func (t *ImportTask) Execute() []*conc.Future[any] {
|
|||||||
reader, err := importutilv2.NewReader(t.ctx, t.cm, t.GetSchema(), file, req.GetOptions(), bufferSize)
|
reader, err := importutilv2.NewReader(t.ctx, t.cm, t.GetSchema(), file, req.GetOptions(), bufferSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("new reader failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
log.Warn("new reader failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
||||||
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(err.Error()))
|
reason := fmt.Sprintf("error: %v, file: %s", err, file.String())
|
||||||
|
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(reason))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
@ -159,7 +161,8 @@ func (t *ImportTask) Execute() []*conc.Future[any] {
|
|||||||
err = t.importFile(reader)
|
err = t.importFile(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("do import failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
log.Warn("do import failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
||||||
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(err.Error()))
|
reason := fmt.Sprintf("error: %v, file: %s", err, file.String())
|
||||||
|
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(reason))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Info("import file done", WrapLogFields(t, zap.Strings("files", file.GetPaths()),
|
log.Info("import file done", WrapLogFields(t, zap.Strings("files", file.GetPaths()),
|
||||||
|
|||||||
@ -127,7 +127,7 @@ func (t *L0ImportTask) Clone() Task {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *L0ImportTask) Execute() []*conc.Future[any] {
|
func (t *L0ImportTask) Execute() []*conc.Future[any] {
|
||||||
bufferSize := paramtable.Get().DataNodeCfg.ImportDeleteBufferSize.GetAsInt() * 1024 * 1024
|
bufferSize := paramtable.Get().DataNodeCfg.ImportDeleteBufferSize.GetAsInt()
|
||||||
log.Info("start to import l0", WrapLogFields(t,
|
log.Info("start to import l0", WrapLogFields(t,
|
||||||
zap.Int("bufferSize", bufferSize),
|
zap.Int("bufferSize", bufferSize),
|
||||||
zap.Any("schema", t.GetSchema()))...)
|
zap.Any("schema", t.GetSchema()))...)
|
||||||
@ -136,8 +136,12 @@ func (t *L0ImportTask) Execute() []*conc.Future[any] {
|
|||||||
fn := func() (err error) {
|
fn := func() (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("l0 import task execute failed", WrapLogFields(t, zap.Error(err))...)
|
var reason string = err.Error()
|
||||||
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(err.Error()))
|
if len(t.req.GetFiles()) == 1 {
|
||||||
|
reason = fmt.Sprintf("error: %v, file: %s", err, t.req.GetFiles()[0].String())
|
||||||
|
}
|
||||||
|
log.Warn("l0 import task execute failed", WrapLogFields(t, zap.Any("file", t.req.GetFiles()), zap.String("err", reason))...)
|
||||||
|
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(reason))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
@ -115,7 +115,7 @@ func (t *L0PreImportTask) Clone() Task {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *L0PreImportTask) Execute() []*conc.Future[any] {
|
func (t *L0PreImportTask) Execute() []*conc.Future[any] {
|
||||||
bufferSize := paramtable.Get().DataNodeCfg.ImportDeleteBufferSize.GetAsInt() * 1024 * 1024
|
bufferSize := paramtable.Get().DataNodeCfg.ImportDeleteBufferSize.GetAsInt()
|
||||||
log.Info("start to preimport l0", WrapLogFields(t,
|
log.Info("start to preimport l0", WrapLogFields(t,
|
||||||
zap.Int("bufferSize", bufferSize),
|
zap.Int("bufferSize", bufferSize),
|
||||||
zap.Any("schema", t.GetSchema()))...)
|
zap.Any("schema", t.GetSchema()))...)
|
||||||
@ -124,8 +124,12 @@ func (t *L0PreImportTask) Execute() []*conc.Future[any] {
|
|||||||
fn := func() (err error) {
|
fn := func() (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("l0 import task execute failed", WrapLogFields(t, zap.Error(err))...)
|
var reason string = err.Error()
|
||||||
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(err.Error()))
|
if len(t.GetFileStats()) == 1 {
|
||||||
|
reason = fmt.Sprintf("error: %v, file: %s", err, t.GetFileStats()[0].GetImportFile().String())
|
||||||
|
}
|
||||||
|
log.Warn("l0 import task execute failed", WrapLogFields(t, zap.String("err", reason))...)
|
||||||
|
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(reason))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
@ -124,7 +124,7 @@ func (t *PreImportTask) Clone() Task {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *PreImportTask) Execute() []*conc.Future[any] {
|
func (t *PreImportTask) Execute() []*conc.Future[any] {
|
||||||
bufferSize := paramtable.Get().DataNodeCfg.ImportInsertBufferSize.GetAsInt() * 1024 * 1024
|
bufferSize := paramtable.Get().DataNodeCfg.ImportInsertBufferSize.GetAsInt()
|
||||||
log.Info("start to preimport", WrapLogFields(t,
|
log.Info("start to preimport", WrapLogFields(t,
|
||||||
zap.Int("bufferSize", bufferSize),
|
zap.Int("bufferSize", bufferSize),
|
||||||
zap.Any("schema", t.GetSchema()))...)
|
zap.Any("schema", t.GetSchema()))...)
|
||||||
@ -138,7 +138,8 @@ func (t *PreImportTask) Execute() []*conc.Future[any] {
|
|||||||
reader, err := importutilv2.NewReader(t.ctx, t.cm, t.GetSchema(), file, t.options, bufferSize)
|
reader, err := importutilv2.NewReader(t.ctx, t.cm, t.GetSchema(), file, t.options, bufferSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("new reader failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
log.Warn("new reader failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
||||||
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(err.Error()))
|
reason := fmt.Sprintf("error: %v, file: %s", err, file.String())
|
||||||
|
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(reason))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
@ -146,7 +147,8 @@ func (t *PreImportTask) Execute() []*conc.Future[any] {
|
|||||||
err = t.readFileStat(reader, i)
|
err = t.readFileStat(reader, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("preimport failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
log.Warn("preimport failed", WrapLogFields(t, zap.String("file", file.String()), zap.Error(err))...)
|
||||||
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(err.Error()))
|
reason := fmt.Sprintf("error: %v, file: %s", err, file.String())
|
||||||
|
t.manager.Update(t.GetTaskID(), UpdateState(datapb.ImportTaskStateV2_Failed), UpdateReason(reason))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Info("read file stat done", WrapLogFields(t, zap.Strings("files", file.GetPaths()),
|
log.Info("read file stat done", WrapLogFields(t, zap.Strings("files", file.GetPaths()),
|
||||||
|
|||||||
@ -5477,6 +5477,10 @@ if this parameter <= 0, will set it as 10`,
|
|||||||
Version: "2.4.0",
|
Version: "2.4.0",
|
||||||
Doc: "The insert buffer size (in MB) during import.",
|
Doc: "The insert buffer size (in MB) during import.",
|
||||||
DefaultValue: "64",
|
DefaultValue: "64",
|
||||||
|
Formatter: func(v string) string {
|
||||||
|
bufferSize := getAsFloat(v)
|
||||||
|
return fmt.Sprintf("%d", int(megaBytes2Bytes(bufferSize)))
|
||||||
|
},
|
||||||
PanicIfEmpty: false,
|
PanicIfEmpty: false,
|
||||||
Export: true,
|
Export: true,
|
||||||
}
|
}
|
||||||
@ -5487,6 +5491,10 @@ if this parameter <= 0, will set it as 10`,
|
|||||||
Version: "2.5.14",
|
Version: "2.5.14",
|
||||||
Doc: "The delete buffer size (in MB) during import.",
|
Doc: "The delete buffer size (in MB) during import.",
|
||||||
DefaultValue: "16",
|
DefaultValue: "16",
|
||||||
|
Formatter: func(v string) string {
|
||||||
|
bufferSize := getAsFloat(v)
|
||||||
|
return fmt.Sprintf("%d", int(megaBytes2Bytes(bufferSize)))
|
||||||
|
},
|
||||||
PanicIfEmpty: false,
|
PanicIfEmpty: false,
|
||||||
Export: true,
|
Export: true,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -611,8 +611,8 @@ func TestComponentParam(t *testing.T) {
|
|||||||
t.Logf("maxConcurrentImportTaskNum: %d", maxConcurrentImportTaskNum)
|
t.Logf("maxConcurrentImportTaskNum: %d", maxConcurrentImportTaskNum)
|
||||||
assert.Equal(t, 16, maxConcurrentImportTaskNum)
|
assert.Equal(t, 16, maxConcurrentImportTaskNum)
|
||||||
assert.Equal(t, int64(16), Params.MaxImportFileSizeInGB.GetAsInt64())
|
assert.Equal(t, int64(16), Params.MaxImportFileSizeInGB.GetAsInt64())
|
||||||
assert.Equal(t, 64, Params.ImportInsertBufferSize.GetAsInt())
|
assert.Equal(t, 64*1024*1024, Params.ImportInsertBufferSize.GetAsInt())
|
||||||
assert.Equal(t, 16, Params.ImportDeleteBufferSize.GetAsInt())
|
assert.Equal(t, 16*1024*1024, Params.ImportDeleteBufferSize.GetAsInt())
|
||||||
assert.Equal(t, 16, Params.MaxTaskSlotNum.GetAsInt())
|
assert.Equal(t, 16, Params.MaxTaskSlotNum.GetAsInt())
|
||||||
params.Save("datanode.gracefulStopTimeout", "100")
|
params.Save("datanode.gracefulStopTimeout", "100")
|
||||||
assert.Equal(t, 100*time.Second, Params.GracefulStopTimeout.GetAsDuration(time.Second))
|
assert.Equal(t, 100*time.Second, Params.GracefulStopTimeout.GetAsDuration(time.Second))
|
||||||
|
|||||||
195
tests/integration/import/auto_id_test.go
Normal file
195
tests/integration/import/auto_id_test.go
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
// Licensed to the LF AI & Data foundation under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package importv2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||||
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/common"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/util/funcutil"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
||||||
|
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||||
|
"github.com/milvus-io/milvus/tests/integration"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) runTestAutoID() {
|
||||||
|
const (
|
||||||
|
rowCount = 10
|
||||||
|
fileNum = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
c := s.Cluster
|
||||||
|
ctx, cancel := context.WithTimeout(c.GetContext(), 240*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
collectionName := "TestBulkInsert" + funcutil.GenRandomStr()
|
||||||
|
|
||||||
|
var schema *schemapb.CollectionSchema
|
||||||
|
fieldSchema1 := &schemapb.FieldSchema{FieldID: 100, Name: "id", DataType: s.pkType, TypeParams: []*commonpb.KeyValuePair{{Key: common.MaxLengthKey, Value: "128"}}, IsPrimaryKey: true, AutoID: true}
|
||||||
|
fieldSchema2 := &schemapb.FieldSchema{FieldID: 101, Name: "image_path", DataType: schemapb.DataType_VarChar, TypeParams: []*commonpb.KeyValuePair{{Key: common.MaxLengthKey, Value: "65535"}}}
|
||||||
|
fieldSchema3 := &schemapb.FieldSchema{FieldID: 102, Name: "embeddings", DataType: s.vecType, TypeParams: []*commonpb.KeyValuePair{{Key: common.DimKey, Value: "128"}}}
|
||||||
|
schema = integration.ConstructSchema(collectionName, dim, true, fieldSchema1, fieldSchema2, fieldSchema3)
|
||||||
|
|
||||||
|
marshaledSchema, err := proto.Marshal(schema)
|
||||||
|
s.NoError(err)
|
||||||
|
|
||||||
|
createCollectionStatus, err := c.Proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
Schema: marshaledSchema,
|
||||||
|
ShardsNum: common.DefaultShardsNum,
|
||||||
|
})
|
||||||
|
s.NoError(err)
|
||||||
|
s.Equal(commonpb.ErrorCode_Success, createCollectionStatus.GetErrorCode())
|
||||||
|
|
||||||
|
err = os.MkdirAll(c.ChunkManager.RootPath(), os.ModePerm)
|
||||||
|
s.NoError(err)
|
||||||
|
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
importReqs := make([]*internalpb.ImportRequest, fileNum)
|
||||||
|
for i := 0; i < fileNum; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
i := i
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
rowBasedFile := fmt.Sprintf("%s/test_%d_%d.json", c.ChunkManager.RootPath(), i, rand.Int())
|
||||||
|
GenerateJSONFile(s.T(), rowBasedFile, schema, rowCount)
|
||||||
|
files := []*internalpb.ImportFile{
|
||||||
|
{
|
||||||
|
Paths: []string{
|
||||||
|
rowBasedFile,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
importReqs[i] = &internalpb.ImportRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
Files: files,
|
||||||
|
Options: []*commonpb.KeyValuePair{},
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
for _, req := range importReqs {
|
||||||
|
os.Remove(req.GetFiles()[0].GetPaths()[0])
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
for i := 0; i < fileNum; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
i := i
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
importResp, err := c.Proxy.ImportV2(ctx, importReqs[i])
|
||||||
|
s.NoError(err)
|
||||||
|
s.Equal(int32(0), importResp.GetStatus().GetCode())
|
||||||
|
log.Info("Import result", zap.Any("importResp", importResp))
|
||||||
|
err = WaitForImportDone(ctx, c, importResp.GetJobID())
|
||||||
|
s.NoError(err)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
|
s.NoError(err)
|
||||||
|
s.NotEmpty(segments)
|
||||||
|
for _, segment := range segments {
|
||||||
|
s.True(len(segment.GetBinlogs()) > 0)
|
||||||
|
s.NoError(CheckLogID(segment.GetBinlogs()))
|
||||||
|
s.True(len(segment.GetDeltalogs()) == 0)
|
||||||
|
s.True(len(segment.GetStatslogs()) > 0)
|
||||||
|
s.NoError(CheckLogID(segment.GetStatslogs()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// create index
|
||||||
|
createIndexStatus, err := c.Proxy.CreateIndex(ctx, &milvuspb.CreateIndexRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
FieldName: "embeddings",
|
||||||
|
IndexName: "_default",
|
||||||
|
ExtraParams: integration.ConstructIndexParam(dim, s.indexType, s.metricType),
|
||||||
|
})
|
||||||
|
s.NoError(err)
|
||||||
|
s.Equal(commonpb.ErrorCode_Success, createIndexStatus.GetErrorCode())
|
||||||
|
s.WaitForIndexBuilt(ctx, collectionName, "embeddings")
|
||||||
|
|
||||||
|
// load
|
||||||
|
loadStatus, err := c.Proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
})
|
||||||
|
s.NoError(err)
|
||||||
|
s.Equal(commonpb.ErrorCode_Success, loadStatus.GetErrorCode())
|
||||||
|
s.WaitForLoad(ctx, collectionName)
|
||||||
|
|
||||||
|
// search
|
||||||
|
expr := ""
|
||||||
|
const (
|
||||||
|
nq = 2
|
||||||
|
topk = 2
|
||||||
|
roundDecimal = -1
|
||||||
|
)
|
||||||
|
params := integration.GetSearchParams(s.indexType, s.metricType)
|
||||||
|
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
||||||
|
"embeddings", s.vecType, nil, s.metricType, params, nq, dim, topk, roundDecimal)
|
||||||
|
searchReq.ConsistencyLevel = commonpb.ConsistencyLevel_Eventually
|
||||||
|
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
||||||
|
s.NoError(err)
|
||||||
|
s.Equal(commonpb.ErrorCode_Success, searchResult.GetStatus().GetErrorCode())
|
||||||
|
s.Equal(nq*topk, len(searchResult.GetResults().GetScores()))
|
||||||
|
|
||||||
|
// verify no duplicate autoID
|
||||||
|
expr = "id >= 0"
|
||||||
|
if s.pkType == schemapb.DataType_VarChar {
|
||||||
|
expr = `id >= "0"`
|
||||||
|
}
|
||||||
|
queryResult, err := c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
Expr: expr,
|
||||||
|
OutputFields: []string{"id"},
|
||||||
|
ConsistencyLevel: commonpb.ConsistencyLevel_Eventually,
|
||||||
|
})
|
||||||
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
|
s.NoError(err)
|
||||||
|
count := len(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData())
|
||||||
|
if s.pkType == schemapb.DataType_VarChar {
|
||||||
|
count = len(queryResult.GetFieldsData()[0].GetScalars().GetStringData().GetData())
|
||||||
|
}
|
||||||
|
s.Equal(rowCount*fileNum, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) TestAutoID() {
|
||||||
|
// make buffer size small to trigger multiple sync
|
||||||
|
paramtable.Get().Save(paramtable.Get().DataNodeCfg.ImportInsertBufferSize.Key, "0.000001")
|
||||||
|
defer paramtable.Get().Reset(paramtable.Get().DataNodeCfg.ImportInsertBufferSize.Key)
|
||||||
|
|
||||||
|
s.pkType = schemapb.DataType_Int64
|
||||||
|
s.runTestAutoID()
|
||||||
|
|
||||||
|
s.pkType = schemapb.DataType_VarChar
|
||||||
|
s.runTestAutoID()
|
||||||
|
}
|
||||||
@ -19,6 +19,7 @@ package importv2
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
@ -39,7 +40,12 @@ import (
|
|||||||
"github.com/milvus-io/milvus/tests/integration"
|
"github.com/milvus-io/milvus/tests/integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int) (int64, int64, *schemapb.IDs) {
|
type DMLGroup struct {
|
||||||
|
insertRowNums []int
|
||||||
|
deleteRowNums []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) PrepareCollectionA(dim int, dmlGroup *DMLGroup) (int64, int64, *schemapb.IDs) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c := s.Cluster
|
c := s.Cluster
|
||||||
@ -51,10 +57,9 @@ func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int)
|
|||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
|
|
||||||
createCollectionStatus, err := c.Proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
createCollectionStatus, err := c.Proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Schema: marshaledSchema,
|
Schema: marshaledSchema,
|
||||||
ShardsNum: common.DefaultShardsNum,
|
ShardsNum: common.DefaultShardsNum,
|
||||||
ConsistencyLevel: commonpb.ConsistencyLevel_Strong,
|
|
||||||
})
|
})
|
||||||
s.NoError(merr.CheckRPCCall(createCollectionStatus, err))
|
s.NoError(merr.CheckRPCCall(createCollectionStatus, err))
|
||||||
|
|
||||||
@ -76,7 +81,6 @@ func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int)
|
|||||||
ExtraParams: integration.ConstructIndexParam(dim, integration.IndexFaissIvfFlat, metric.L2),
|
ExtraParams: integration.ConstructIndexParam(dim, integration.IndexFaissIvfFlat, metric.L2),
|
||||||
})
|
})
|
||||||
s.NoError(merr.CheckRPCCall(createIndexStatus, err))
|
s.NoError(merr.CheckRPCCall(createIndexStatus, err))
|
||||||
|
|
||||||
s.WaitForIndexBuilt(ctx, collectionName, integration.FloatVecField)
|
s.WaitForIndexBuilt(ctx, collectionName, integration.FloatVecField)
|
||||||
|
|
||||||
// load
|
// load
|
||||||
@ -86,67 +90,85 @@ func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int)
|
|||||||
s.NoError(merr.CheckRPCCall(loadStatus, err))
|
s.NoError(merr.CheckRPCCall(loadStatus, err))
|
||||||
s.WaitForLoad(ctx, collectionName)
|
s.WaitForLoad(ctx, collectionName)
|
||||||
|
|
||||||
fVecColumn := integration.NewFloatVectorFieldData(integration.FloatVecField, rowNum, dim)
|
const delBatch = 2
|
||||||
hashKeys := integration.GenerateHashKeys(rowNum)
|
var (
|
||||||
insertResult, err := c.Proxy.Insert(ctx, &milvuspb.InsertRequest{
|
totalInsertRowNum = 0
|
||||||
CollectionName: collectionName,
|
totalDeleteRowNum = 0
|
||||||
FieldsData: []*schemapb.FieldData{fVecColumn},
|
totalInsertedIDs = &schemapb.IDs{
|
||||||
HashKeys: hashKeys,
|
IdField: &schemapb.IDs_IntId{
|
||||||
NumRows: uint32(rowNum),
|
IntId: &schemapb.LongArray{
|
||||||
})
|
Data: make([]int64, 0),
|
||||||
s.NoError(merr.CheckRPCCall(insertResult, err))
|
},
|
||||||
insertedIDs := insertResult.GetIDs()
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// flush
|
for i := range dmlGroup.insertRowNums {
|
||||||
flushResp, err := c.Proxy.Flush(ctx, &milvuspb.FlushRequest{
|
insRow := dmlGroup.insertRowNums[i]
|
||||||
CollectionNames: []string{collectionName},
|
delRow := dmlGroup.deleteRowNums[i]
|
||||||
})
|
totalInsertRowNum += insRow
|
||||||
s.NoError(merr.CheckRPCCall(flushResp, err))
|
totalDeleteRowNum += delRow
|
||||||
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
|
||||||
ids := segmentIDs.GetData()
|
|
||||||
s.Require().NotEmpty(segmentIDs)
|
|
||||||
s.Require().True(has)
|
|
||||||
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
|
||||||
s.True(has)
|
|
||||||
|
|
||||||
s.WaitForFlush(ctx, ids, flushTs, "", collectionName)
|
fVecColumn := integration.NewFloatVectorFieldData(integration.FloatVecField, insRow, dim)
|
||||||
segments, err := c.MetaWatcher.ShowSegments()
|
hashKeys := integration.GenerateHashKeys(insRow)
|
||||||
s.NoError(err)
|
insertResult, err := c.Proxy.Insert(ctx, &milvuspb.InsertRequest{
|
||||||
s.NotEmpty(segments)
|
|
||||||
for _, segment := range segments {
|
|
||||||
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete
|
|
||||||
beginIndex := 0
|
|
||||||
for i := 0; i < delBatch; i++ {
|
|
||||||
delCnt := delNum / delBatch
|
|
||||||
idBegin := insertedIDs.GetIntId().GetData()[beginIndex]
|
|
||||||
idEnd := insertedIDs.GetIntId().GetData()[beginIndex+delCnt]
|
|
||||||
deleteResult, err := c.Proxy.Delete(ctx, &milvuspb.DeleteRequest{
|
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: fmt.Sprintf("%d <= %s < %d", idBegin, integration.Int64Field, idEnd),
|
FieldsData: []*schemapb.FieldData{fVecColumn},
|
||||||
|
HashKeys: hashKeys,
|
||||||
|
NumRows: uint32(insRow),
|
||||||
})
|
})
|
||||||
s.NoError(merr.CheckRPCCall(deleteResult, err))
|
s.NoError(merr.CheckRPCCall(insertResult, err))
|
||||||
beginIndex += delCnt
|
insertedIDs := insertResult.GetIDs()
|
||||||
|
totalInsertedIDs.IdField.(*schemapb.IDs_IntId).IntId.Data = append(
|
||||||
|
totalInsertedIDs.IdField.(*schemapb.IDs_IntId).IntId.Data, insertedIDs.IdField.(*schemapb.IDs_IntId).IntId.Data...)
|
||||||
|
|
||||||
flushResp, err = c.Proxy.Flush(ctx, &milvuspb.FlushRequest{
|
// delete
|
||||||
|
beginIndex := 0
|
||||||
|
for j := 0; j < delBatch; j++ {
|
||||||
|
if delRow == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delCnt := delRow / delBatch
|
||||||
|
idBegin := insertedIDs.GetIntId().GetData()[beginIndex]
|
||||||
|
idEnd := insertedIDs.GetIntId().GetData()[beginIndex+delCnt-1]
|
||||||
|
deleteResult, err := c.Proxy.Delete(ctx, &milvuspb.DeleteRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
Expr: fmt.Sprintf("%d <= %s <= %d", idBegin, integration.Int64Field, idEnd),
|
||||||
|
})
|
||||||
|
s.NoError(merr.CheckRPCCall(deleteResult, err))
|
||||||
|
beginIndex += delCnt
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush
|
||||||
|
flushResp, err := c.Proxy.Flush(ctx, &milvuspb.FlushRequest{
|
||||||
CollectionNames: []string{collectionName},
|
CollectionNames: []string{collectionName},
|
||||||
})
|
})
|
||||||
s.NoError(merr.CheckRPCCall(flushResp, err))
|
s.NoError(merr.CheckRPCCall(flushResp, err))
|
||||||
flushTs, has = flushResp.GetCollFlushTs()[collectionName]
|
segmentIDs, has := flushResp.GetCollSegIDs()[collectionName]
|
||||||
|
ids := segmentIDs.GetData()
|
||||||
|
s.Require().NotEmpty(segmentIDs)
|
||||||
|
s.Require().True(has)
|
||||||
|
flushTs, has := flushResp.GetCollFlushTs()[collectionName]
|
||||||
s.True(has)
|
s.True(has)
|
||||||
s.WaitForFlush(ctx, nil, flushTs, "", collectionName)
|
s.WaitForFlush(ctx, ids, flushTs, "", collectionName)
|
||||||
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
|
s.NoError(err)
|
||||||
|
s.NotEmpty(segments)
|
||||||
|
for _, segment := range segments {
|
||||||
|
log.Info("ShowSegments result", zap.String("segment", segment.String()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check l0 segments
|
// check l0 segments
|
||||||
segments, err = c.MetaWatcher.ShowSegments()
|
if totalDeleteRowNum > 0 {
|
||||||
s.NoError(err)
|
segments, err := c.MetaWatcher.ShowSegments()
|
||||||
s.NotEmpty(segments)
|
s.NoError(err)
|
||||||
l0Segments := lo.Filter(segments, func(segment *datapb.SegmentInfo, _ int) bool {
|
s.NotEmpty(segments)
|
||||||
return segment.GetLevel() == datapb.SegmentLevel_L0
|
l0Segments := lo.Filter(segments, func(segment *datapb.SegmentInfo, _ int) bool {
|
||||||
})
|
return segment.GetLevel() == datapb.SegmentLevel_L0
|
||||||
s.Equal(delBatch, len(l0Segments))
|
})
|
||||||
|
s.True(len(l0Segments) > 0)
|
||||||
|
}
|
||||||
|
|
||||||
// search
|
// search
|
||||||
expr := fmt.Sprintf("%s > 0", integration.Int64Field)
|
expr := fmt.Sprintf("%s > 0", integration.Int64Field)
|
||||||
@ -162,7 +184,11 @@ func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int)
|
|||||||
|
|
||||||
err = merr.CheckRPCCall(searchResult, err)
|
err = merr.CheckRPCCall(searchResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.Equal(nq*topk, len(searchResult.GetResults().GetScores()))
|
expectResult := nq * topk
|
||||||
|
if expectResult > totalInsertRowNum-totalDeleteRowNum {
|
||||||
|
expectResult = totalInsertRowNum - totalDeleteRowNum
|
||||||
|
}
|
||||||
|
s.Equal(expectResult, len(searchResult.GetResults().GetScores()))
|
||||||
|
|
||||||
// query
|
// query
|
||||||
expr = fmt.Sprintf("%s >= 0", integration.Int64Field)
|
expr = fmt.Sprintf("%s >= 0", integration.Int64Field)
|
||||||
@ -174,10 +200,10 @@ func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int)
|
|||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
count := int(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData()[0])
|
count := int(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData()[0])
|
||||||
s.Equal(rowNum-delNum, count)
|
s.Equal(totalInsertRowNum-totalDeleteRowNum, count)
|
||||||
|
|
||||||
// query 2
|
// query 2
|
||||||
expr = fmt.Sprintf("%s < %d", integration.Int64Field, insertedIDs.GetIntId().GetData()[10])
|
expr = fmt.Sprintf("%s < %d", integration.Int64Field, totalInsertedIDs.GetIntId().GetData()[10])
|
||||||
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: expr,
|
Expr: expr,
|
||||||
@ -186,28 +212,34 @@ func (s *BulkInsertSuite) PrepareCollectionA(dim, rowNum, delNum, delBatch int)
|
|||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
count = len(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData())
|
count = len(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData())
|
||||||
s.Equal(0, count)
|
expectCount := 10
|
||||||
|
if dmlGroup.deleteRowNums[0] >= 10 {
|
||||||
|
expectCount = 0
|
||||||
|
}
|
||||||
|
s.Equal(expectCount, count)
|
||||||
|
|
||||||
// get collectionID and partitionID
|
// get collectionID and partitionID
|
||||||
collectionID := showCollectionsResp.GetCollectionIds()[0]
|
collectionID := showCollectionsResp.GetCollectionIds()[0]
|
||||||
partitionID := showPartitionsResp.GetPartitionIDs()[0]
|
partitionID := showPartitionsResp.GetPartitionIDs()[0]
|
||||||
|
|
||||||
return collectionID, partitionID, insertedIDs
|
return collectionID, partitionID, totalInsertedIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BulkInsertSuite) TestBinlogImport() {
|
func (s *BulkInsertSuite) runBinlogTest(dmlGroup *DMLGroup) {
|
||||||
const (
|
const dim = 128
|
||||||
dim = 128
|
|
||||||
rowNum = 50000
|
|
||||||
delNum = 30000
|
|
||||||
delBatch = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
collectionID, partitionID, insertedIDs := s.PrepareCollectionA(dim, rowNum, delNum, delBatch)
|
collectionID, partitionID, insertedIDs := s.PrepareCollectionA(dim, dmlGroup)
|
||||||
|
|
||||||
c := s.Cluster
|
c := s.Cluster
|
||||||
ctx := c.GetContext()
|
ctx := c.GetContext()
|
||||||
|
|
||||||
|
totalInsertRowNum := lo.SumBy(dmlGroup.insertRowNums, func(num int) int {
|
||||||
|
return num
|
||||||
|
})
|
||||||
|
totalDeleteRowNum := lo.SumBy(dmlGroup.deleteRowNums, func(num int) int {
|
||||||
|
return num
|
||||||
|
})
|
||||||
|
|
||||||
collectionName := "TestBinlogImport_B_" + funcutil.GenRandomStr()
|
collectionName := "TestBinlogImport_B_" + funcutil.GenRandomStr()
|
||||||
|
|
||||||
schema := integration.ConstructSchema(collectionName, dim, true)
|
schema := integration.ConstructSchema(collectionName, dim, true)
|
||||||
@ -246,16 +278,12 @@ func (s *BulkInsertSuite) TestBinlogImport() {
|
|||||||
s.NoError(merr.CheckRPCCall(flushedSegmentsResp, err))
|
s.NoError(merr.CheckRPCCall(flushedSegmentsResp, err))
|
||||||
flushedSegments := flushedSegmentsResp.GetSegments()
|
flushedSegments := flushedSegmentsResp.GetSegments()
|
||||||
log.Info("flushed segments", zap.Int64s("segments", flushedSegments))
|
log.Info("flushed segments", zap.Int64s("segments", flushedSegments))
|
||||||
segmentBinlogPrefixes := make([]string, 0)
|
|
||||||
for _, segmentID := range flushedSegments {
|
|
||||||
segmentBinlogPrefixes = append(segmentBinlogPrefixes,
|
|
||||||
fmt.Sprintf("/tmp/%s/insert_log/%d/%d/%d", paramtable.Get().EtcdCfg.RootPath.GetValue(), collectionID, partitionID, segmentID))
|
|
||||||
}
|
|
||||||
// binlog import
|
// binlog import
|
||||||
files := []*internalpb.ImportFile{
|
files := make([]*internalpb.ImportFile, 0)
|
||||||
{
|
for _, segmentID := range flushedSegments {
|
||||||
Paths: segmentBinlogPrefixes,
|
files = append(files, &internalpb.ImportFile{Paths: []string{fmt.Sprintf("/tmp/%s/insert_log/%d/%d/%d",
|
||||||
},
|
paramtable.Get().EtcdCfg.RootPath.GetValue(), collectionID, partitionID, segmentID)}})
|
||||||
}
|
}
|
||||||
importResp, err := c.Proxy.ImportV2(ctx, &internalpb.ImportRequest{
|
importResp, err := c.Proxy.ImportV2(ctx, &internalpb.ImportRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
@ -293,45 +321,48 @@ func (s *BulkInsertSuite) TestBinlogImport() {
|
|||||||
s.NoError(CheckLogID(segment.GetStatslogs()))
|
s.NoError(CheckLogID(segment.GetStatslogs()))
|
||||||
|
|
||||||
// l0 import
|
// l0 import
|
||||||
files = []*internalpb.ImportFile{
|
if totalDeleteRowNum > 0 {
|
||||||
{
|
files = []*internalpb.ImportFile{
|
||||||
Paths: []string{
|
{
|
||||||
fmt.Sprintf("/tmp/%s/delta_log/%d/%d/", paramtable.Get().EtcdCfg.RootPath.GetValue(), collectionID, common.AllPartitionsID),
|
Paths: []string{
|
||||||
|
fmt.Sprintf("/tmp/%s/delta_log/%d/%d/",
|
||||||
|
paramtable.Get().EtcdCfg.RootPath.GetValue(), collectionID, common.AllPartitionsID),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
|
importResp, err = c.Proxy.ImportV2(ctx, &internalpb.ImportRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
Files: files,
|
||||||
|
Options: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "l0_import", Value: "true"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
s.NoError(merr.CheckRPCCall(importResp, err))
|
||||||
|
log.Info("Import result", zap.Any("importResp", importResp))
|
||||||
|
|
||||||
|
jobID = importResp.GetJobID()
|
||||||
|
err = WaitForImportDone(ctx, c, jobID)
|
||||||
|
s.NoError(err)
|
||||||
|
|
||||||
|
segments, err = c.MetaWatcher.ShowSegments()
|
||||||
|
s.NoError(err)
|
||||||
|
s.NotEmpty(segments)
|
||||||
|
segments = lo.Filter(segments, func(segment *datapb.SegmentInfo, _ int) bool {
|
||||||
|
return segment.GetCollectionID() == newCollectionID
|
||||||
|
})
|
||||||
|
log.Info("Show segments", zap.Any("segments", segments))
|
||||||
|
l0Segments := lo.Filter(segments, func(segment *datapb.SegmentInfo, _ int) bool {
|
||||||
|
return segment.GetCollectionID() == newCollectionID && segment.GetLevel() == datapb.SegmentLevel_L0
|
||||||
|
})
|
||||||
|
s.Equal(1, len(l0Segments))
|
||||||
|
segment = l0Segments[0]
|
||||||
|
s.Equal(commonpb.SegmentState_Flushed, segment.GetState())
|
||||||
|
s.Equal(common.AllPartitionsID, segment.GetPartitionID())
|
||||||
|
s.True(len(segment.GetBinlogs()) == 0)
|
||||||
|
s.True(len(segment.GetDeltalogs()) > 0)
|
||||||
|
s.NoError(CheckLogID(segment.GetDeltalogs()))
|
||||||
|
s.True(len(segment.GetStatslogs()) == 0)
|
||||||
}
|
}
|
||||||
importResp, err = c.Proxy.ImportV2(ctx, &internalpb.ImportRequest{
|
|
||||||
CollectionName: collectionName,
|
|
||||||
Files: files,
|
|
||||||
Options: []*commonpb.KeyValuePair{
|
|
||||||
{Key: "l0_import", Value: "true"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
s.NoError(merr.CheckRPCCall(importResp, err))
|
|
||||||
log.Info("Import result", zap.Any("importResp", importResp))
|
|
||||||
|
|
||||||
jobID = importResp.GetJobID()
|
|
||||||
err = WaitForImportDone(ctx, c, jobID)
|
|
||||||
s.NoError(err)
|
|
||||||
|
|
||||||
segments, err = c.MetaWatcher.ShowSegments()
|
|
||||||
s.NoError(err)
|
|
||||||
s.NotEmpty(segments)
|
|
||||||
segments = lo.Filter(segments, func(segment *datapb.SegmentInfo, _ int) bool {
|
|
||||||
return segment.GetCollectionID() == newCollectionID
|
|
||||||
})
|
|
||||||
log.Info("Show segments", zap.Any("segments", segments))
|
|
||||||
l0Segments := lo.Filter(segments, func(segment *datapb.SegmentInfo, _ int) bool {
|
|
||||||
return segment.GetCollectionID() == newCollectionID && segment.GetLevel() == datapb.SegmentLevel_L0
|
|
||||||
})
|
|
||||||
s.Equal(1, len(l0Segments))
|
|
||||||
segment = l0Segments[0]
|
|
||||||
s.Equal(commonpb.SegmentState_Flushed, segment.GetState())
|
|
||||||
s.Equal(common.AllPartitionsID, segment.GetPartitionID())
|
|
||||||
s.True(len(segment.GetBinlogs()) == 0)
|
|
||||||
s.True(len(segment.GetDeltalogs()) > 0)
|
|
||||||
s.NoError(CheckLogID(segment.GetDeltalogs()))
|
|
||||||
s.True(len(segment.GetStatslogs()) == 0)
|
|
||||||
|
|
||||||
// load
|
// load
|
||||||
loadStatus, err := c.Proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
loadStatus, err := c.Proxy.LoadCollection(ctx, &milvuspb.LoadCollectionRequest{
|
||||||
@ -349,12 +380,17 @@ func (s *BulkInsertSuite) TestBinlogImport() {
|
|||||||
params := integration.GetSearchParams(integration.IndexFaissIvfFlat, metric.L2)
|
params := integration.GetSearchParams(integration.IndexFaissIvfFlat, metric.L2)
|
||||||
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
||||||
integration.FloatVecField, schemapb.DataType_FloatVector, nil, metric.L2, params, nq, dim, topk, roundDecimal)
|
integration.FloatVecField, schemapb.DataType_FloatVector, nil, metric.L2, params, nq, dim, topk, roundDecimal)
|
||||||
|
searchReq.ConsistencyLevel = commonpb.ConsistencyLevel_Eventually
|
||||||
|
|
||||||
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
||||||
|
|
||||||
err = merr.CheckRPCCall(searchResult, err)
|
err = merr.CheckRPCCall(searchResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
s.Equal(nq*topk, len(searchResult.GetResults().GetScores()))
|
expectResult := nq * topk
|
||||||
|
if expectResult > totalInsertRowNum-totalDeleteRowNum {
|
||||||
|
expectResult = totalInsertRowNum - totalDeleteRowNum
|
||||||
|
}
|
||||||
|
s.Equal(expectResult, len(searchResult.GetResults().GetScores()))
|
||||||
// check ids from collectionA, because during binlog import, even if the primary key's autoID is set to true,
|
// check ids from collectionA, because during binlog import, even if the primary key's autoID is set to true,
|
||||||
// the primary key from the binlog should be used instead of being reassigned.
|
// the primary key from the binlog should be used instead of being reassigned.
|
||||||
insertedIDsMap := lo.SliceToMap(insertedIDs.GetIntId().GetData(), func(id int64) (int64, struct{}) {
|
insertedIDsMap := lo.SliceToMap(insertedIDs.GetIntId().GetData(), func(id int64) (int64, struct{}) {
|
||||||
@ -368,24 +404,104 @@ func (s *BulkInsertSuite) TestBinlogImport() {
|
|||||||
// query
|
// query
|
||||||
expr = fmt.Sprintf("%s >= 0", integration.Int64Field)
|
expr = fmt.Sprintf("%s >= 0", integration.Int64Field)
|
||||||
queryResult, err := c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
queryResult, err := c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: expr,
|
Expr: expr,
|
||||||
OutputFields: []string{"count(*)"},
|
OutputFields: []string{"count(*)"},
|
||||||
|
ConsistencyLevel: commonpb.ConsistencyLevel_Eventually,
|
||||||
})
|
})
|
||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
count := int(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData()[0])
|
count := int(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData()[0])
|
||||||
s.Equal(rowNum-delNum, count)
|
s.Equal(totalInsertRowNum-totalDeleteRowNum, count)
|
||||||
|
|
||||||
// query 2
|
// query 2
|
||||||
expr = fmt.Sprintf("%s < %d", integration.Int64Field, insertedIDs.GetIntId().GetData()[10])
|
expr = fmt.Sprintf("%s < %d", integration.Int64Field, insertedIDs.GetIntId().GetData()[10])
|
||||||
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: expr,
|
Expr: expr,
|
||||||
OutputFields: []string{},
|
OutputFields: []string{},
|
||||||
|
ConsistencyLevel: commonpb.ConsistencyLevel_Eventually,
|
||||||
})
|
})
|
||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
count = len(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData())
|
count = len(queryResult.GetFieldsData()[0].GetScalars().GetLongData().GetData())
|
||||||
s.Equal(0, count)
|
expectCount := 10
|
||||||
|
if dmlGroup.deleteRowNums[0] >= 10 {
|
||||||
|
expectCount = 0
|
||||||
|
}
|
||||||
|
s.Equal(expectCount, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) TestInvalidInput() {
|
||||||
|
const dim = 128
|
||||||
|
c := s.Cluster
|
||||||
|
ctx := c.GetContext()
|
||||||
|
|
||||||
|
collectionName := "TestBinlogImport_InvalidInput_" + funcutil.GenRandomStr()
|
||||||
|
schema := integration.ConstructSchema(collectionName, dim, true)
|
||||||
|
marshaledSchema, err := proto.Marshal(schema)
|
||||||
|
s.NoError(err)
|
||||||
|
|
||||||
|
createCollectionStatus, err := c.Proxy.CreateCollection(ctx, &milvuspb.CreateCollectionRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
Schema: marshaledSchema,
|
||||||
|
ShardsNum: common.DefaultShardsNum,
|
||||||
|
})
|
||||||
|
s.NoError(merr.CheckRPCCall(createCollectionStatus, err))
|
||||||
|
|
||||||
|
describeCollectionResp, err := c.Proxy.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
})
|
||||||
|
s.NoError(merr.CheckRPCCall(describeCollectionResp, err))
|
||||||
|
|
||||||
|
// binlog import
|
||||||
|
files := []*internalpb.ImportFile{
|
||||||
|
{
|
||||||
|
Paths: []string{"invalid-path", "invalid-path", "invalid-path"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
importResp, err := c.Proxy.ImportV2(ctx, &internalpb.ImportRequest{
|
||||||
|
CollectionName: collectionName,
|
||||||
|
PartitionName: paramtable.Get().CommonCfg.DefaultPartitionName.GetValue(),
|
||||||
|
Files: files,
|
||||||
|
Options: []*commonpb.KeyValuePair{
|
||||||
|
{Key: "backup", Value: "true"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
err = merr.CheckRPCCall(importResp, err)
|
||||||
|
s.True(strings.Contains(err.Error(), "too many input paths for binlog import"))
|
||||||
|
s.Error(err)
|
||||||
|
log.Info("Import result", zap.Any("importResp", importResp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) TestBinlogImport() {
|
||||||
|
dmlGroup := &DMLGroup{
|
||||||
|
insertRowNums: []int{500, 500, 500},
|
||||||
|
deleteRowNums: []int{300, 300, 300},
|
||||||
|
}
|
||||||
|
s.runBinlogTest(dmlGroup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) TestBinlogImport_NoDelete() {
|
||||||
|
dmlGroup := &DMLGroup{
|
||||||
|
insertRowNums: []int{500, 500, 500},
|
||||||
|
deleteRowNums: []int{0, 0, 0},
|
||||||
|
}
|
||||||
|
s.runBinlogTest(dmlGroup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) TestBinlogImport_Partial_0_Rows_Segment() {
|
||||||
|
dmlGroup := &DMLGroup{
|
||||||
|
insertRowNums: []int{500, 500, 500},
|
||||||
|
deleteRowNums: []int{500, 300, 0},
|
||||||
|
}
|
||||||
|
s.runBinlogTest(dmlGroup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BulkInsertSuite) TestBinlogImport_All_0_Rows_Segment() {
|
||||||
|
dmlGroup := &DMLGroup{
|
||||||
|
insertRowNums: []int{500, 500, 500},
|
||||||
|
deleteRowNums: []int{500, 500, 500},
|
||||||
|
}
|
||||||
|
s.runBinlogTest(dmlGroup)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -43,7 +43,7 @@ import (
|
|||||||
|
|
||||||
func (s *BulkInsertSuite) testImportDynamicField() {
|
func (s *BulkInsertSuite) testImportDynamicField() {
|
||||||
const (
|
const (
|
||||||
rowCount = 10000
|
rowCount = 100
|
||||||
)
|
)
|
||||||
|
|
||||||
c := s.Cluster
|
c := s.Cluster
|
||||||
@ -191,6 +191,7 @@ func (s *BulkInsertSuite) testImportDynamicField() {
|
|||||||
params := integration.GetSearchParams(integration.IndexFaissIvfFlat, metric.L2)
|
params := integration.GetSearchParams(integration.IndexFaissIvfFlat, metric.L2)
|
||||||
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
||||||
integration.FloatVecField, schemapb.DataType_FloatVector, nil, metric.L2, params, nq, dim, topk, roundDecimal)
|
integration.FloatVecField, schemapb.DataType_FloatVector, nil, metric.L2, params, nq, dim, topk, roundDecimal)
|
||||||
|
searchReq.ConsistencyLevel = commonpb.ConsistencyLevel_Eventually
|
||||||
|
|
||||||
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
||||||
|
|
||||||
|
|||||||
@ -49,7 +49,6 @@ type BulkInsertSuite struct {
|
|||||||
failedReason string
|
failedReason string
|
||||||
|
|
||||||
pkType schemapb.DataType
|
pkType schemapb.DataType
|
||||||
autoID bool
|
|
||||||
fileType importutilv2.FileType
|
fileType importutilv2.FileType
|
||||||
|
|
||||||
vecType schemapb.DataType
|
vecType schemapb.DataType
|
||||||
@ -63,7 +62,6 @@ func (s *BulkInsertSuite) SetupTest() {
|
|||||||
s.failed = false
|
s.failed = false
|
||||||
s.fileType = importutilv2.Parquet
|
s.fileType = importutilv2.Parquet
|
||||||
s.pkType = schemapb.DataType_Int64
|
s.pkType = schemapb.DataType_Int64
|
||||||
s.autoID = false
|
|
||||||
|
|
||||||
s.vecType = schemapb.DataType_FloatVector
|
s.vecType = schemapb.DataType_FloatVector
|
||||||
s.indexType = "HNSW"
|
s.indexType = "HNSW"
|
||||||
@ -82,14 +80,14 @@ func (s *BulkInsertSuite) run() {
|
|||||||
collectionName := "TestBulkInsert" + funcutil.GenRandomStr()
|
collectionName := "TestBulkInsert" + funcutil.GenRandomStr()
|
||||||
|
|
||||||
var schema *schemapb.CollectionSchema
|
var schema *schemapb.CollectionSchema
|
||||||
fieldSchema1 := &schemapb.FieldSchema{FieldID: 100, Name: "id", DataType: s.pkType, TypeParams: []*commonpb.KeyValuePair{{Key: common.MaxLengthKey, Value: "128"}}, IsPrimaryKey: true, AutoID: s.autoID}
|
fieldSchema1 := &schemapb.FieldSchema{FieldID: 100, Name: "id", DataType: s.pkType, TypeParams: []*commonpb.KeyValuePair{{Key: common.MaxLengthKey, Value: "128"}}, IsPrimaryKey: true, AutoID: false}
|
||||||
fieldSchema2 := &schemapb.FieldSchema{FieldID: 101, Name: "image_path", DataType: schemapb.DataType_VarChar, TypeParams: []*commonpb.KeyValuePair{{Key: common.MaxLengthKey, Value: "65535"}}}
|
fieldSchema2 := &schemapb.FieldSchema{FieldID: 101, Name: "image_path", DataType: schemapb.DataType_VarChar, TypeParams: []*commonpb.KeyValuePair{{Key: common.MaxLengthKey, Value: "65535"}}}
|
||||||
fieldSchema3 := &schemapb.FieldSchema{FieldID: 102, Name: "embeddings", DataType: s.vecType, TypeParams: []*commonpb.KeyValuePair{{Key: common.DimKey, Value: "128"}}}
|
fieldSchema3 := &schemapb.FieldSchema{FieldID: 102, Name: "embeddings", DataType: s.vecType, TypeParams: []*commonpb.KeyValuePair{{Key: common.DimKey, Value: "128"}}}
|
||||||
fieldSchema4 := &schemapb.FieldSchema{FieldID: 103, Name: "embeddings", DataType: s.vecType, TypeParams: []*commonpb.KeyValuePair{}}
|
fieldSchema4 := &schemapb.FieldSchema{FieldID: 103, Name: "embeddings", DataType: s.vecType, TypeParams: []*commonpb.KeyValuePair{}}
|
||||||
if s.vecType != schemapb.DataType_SparseFloatVector {
|
if s.vecType != schemapb.DataType_SparseFloatVector {
|
||||||
schema = integration.ConstructSchema(collectionName, dim, s.autoID, fieldSchema1, fieldSchema2, fieldSchema3)
|
schema = integration.ConstructSchema(collectionName, dim, false, fieldSchema1, fieldSchema2, fieldSchema3)
|
||||||
} else {
|
} else {
|
||||||
schema = integration.ConstructSchema(collectionName, dim, s.autoID, fieldSchema1, fieldSchema2, fieldSchema4)
|
schema = integration.ConstructSchema(collectionName, dim, false, fieldSchema1, fieldSchema2, fieldSchema4)
|
||||||
}
|
}
|
||||||
|
|
||||||
marshaledSchema, err := proto.Marshal(schema)
|
marshaledSchema, err := proto.Marshal(schema)
|
||||||
@ -211,6 +209,7 @@ func (s *BulkInsertSuite) run() {
|
|||||||
params := integration.GetSearchParams(s.indexType, s.metricType)
|
params := integration.GetSearchParams(s.indexType, s.metricType)
|
||||||
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
||||||
"embeddings", s.vecType, nil, s.metricType, params, nq, dim, topk, roundDecimal)
|
"embeddings", s.vecType, nil, s.metricType, params, nq, dim, topk, roundDecimal)
|
||||||
|
searchReq.ConsistencyLevel = commonpb.ConsistencyLevel_Eventually
|
||||||
|
|
||||||
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -259,16 +258,6 @@ func (s *BulkInsertSuite) TestMultiFileTypes() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BulkInsertSuite) TestAutoID() {
|
|
||||||
s.pkType = schemapb.DataType_Int64
|
|
||||||
s.autoID = true
|
|
||||||
s.run()
|
|
||||||
|
|
||||||
s.pkType = schemapb.DataType_VarChar
|
|
||||||
s.autoID = true
|
|
||||||
s.run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BulkInsertSuite) TestPK() {
|
func (s *BulkInsertSuite) TestPK() {
|
||||||
s.pkType = schemapb.DataType_Int64
|
s.pkType = schemapb.DataType_Int64
|
||||||
s.run()
|
s.run()
|
||||||
|
|||||||
@ -43,7 +43,7 @@ import (
|
|||||||
|
|
||||||
func (s *BulkInsertSuite) testMultipleVectorFields() {
|
func (s *BulkInsertSuite) testMultipleVectorFields() {
|
||||||
const (
|
const (
|
||||||
rowCount = 10000
|
rowCount = 100
|
||||||
dim1 = 64
|
dim1 = 64
|
||||||
dim2 = 32
|
dim2 = 32
|
||||||
)
|
)
|
||||||
@ -215,6 +215,7 @@ func (s *BulkInsertSuite) testMultipleVectorFields() {
|
|||||||
params := integration.GetSearchParams(integration.IndexFaissIvfFlat, metric.L2)
|
params := integration.GetSearchParams(integration.IndexFaissIvfFlat, metric.L2)
|
||||||
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
searchReq := integration.ConstructSearchRequest("", collectionName, expr,
|
||||||
integration.FloatVecField, schemapb.DataType_FloatVector, nil, metric.L2, params, nq, dim1, topk, roundDecimal)
|
integration.FloatVecField, schemapb.DataType_FloatVector, nil, metric.L2, params, nq, dim1, topk, roundDecimal)
|
||||||
|
searchReq.ConsistencyLevel = commonpb.ConsistencyLevel_Eventually
|
||||||
|
|
||||||
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
searchResult, err := c.Proxy.Search(ctx, searchReq)
|
||||||
|
|
||||||
@ -225,6 +226,7 @@ func (s *BulkInsertSuite) testMultipleVectorFields() {
|
|||||||
// search vec 2
|
// search vec 2
|
||||||
searchReq = integration.ConstructSearchRequest("", collectionName, expr,
|
searchReq = integration.ConstructSearchRequest("", collectionName, expr,
|
||||||
integration.BFloat16VecField, schemapb.DataType_BFloat16Vector, nil, metric.L2, params, nq, dim2, topk, roundDecimal)
|
integration.BFloat16VecField, schemapb.DataType_BFloat16Vector, nil, metric.L2, params, nq, dim2, topk, roundDecimal)
|
||||||
|
searchReq.ConsistencyLevel = commonpb.ConsistencyLevel_Eventually
|
||||||
|
|
||||||
searchResult, err = c.Proxy.Search(ctx, searchReq)
|
searchResult, err = c.Proxy.Search(ctx, searchReq)
|
||||||
|
|
||||||
|
|||||||
@ -42,7 +42,7 @@ import (
|
|||||||
|
|
||||||
func (s *BulkInsertSuite) TestImportWithPartitionKey() {
|
func (s *BulkInsertSuite) TestImportWithPartitionKey() {
|
||||||
const (
|
const (
|
||||||
rowCount = 10000
|
rowCount = 100
|
||||||
)
|
)
|
||||||
|
|
||||||
c := s.Cluster
|
c := s.Cluster
|
||||||
@ -338,9 +338,10 @@ func (s *BulkInsertSuite) TestImportWithAFewRows() {
|
|||||||
str := strings.Join(strs, `,`)
|
str := strings.Join(strs, `,`)
|
||||||
expr := fmt.Sprintf("%s in [%v]", integration.VarCharField, str)
|
expr := fmt.Sprintf("%s in [%v]", integration.VarCharField, str)
|
||||||
queryResult, err := c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
queryResult, err := c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: expr,
|
Expr: expr,
|
||||||
OutputFields: []string{integration.VarCharField},
|
OutputFields: []string{integration.VarCharField},
|
||||||
|
ConsistencyLevel: commonpb.ConsistencyLevel_Eventually,
|
||||||
})
|
})
|
||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -355,9 +356,10 @@ func (s *BulkInsertSuite) TestImportWithAFewRows() {
|
|||||||
// query partition key, CmpOp 1
|
// query partition key, CmpOp 1
|
||||||
expr = fmt.Sprintf("%s >= 0", integration.Int64Field)
|
expr = fmt.Sprintf("%s >= 0", integration.Int64Field)
|
||||||
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: expr,
|
Expr: expr,
|
||||||
OutputFields: []string{integration.VarCharField},
|
OutputFields: []string{integration.VarCharField},
|
||||||
|
ConsistencyLevel: commonpb.ConsistencyLevel_Eventually,
|
||||||
})
|
})
|
||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
@ -373,9 +375,10 @@ func (s *BulkInsertSuite) TestImportWithAFewRows() {
|
|||||||
target := partitionKeyData[rand.Intn(rowCount)]
|
target := partitionKeyData[rand.Intn(rowCount)]
|
||||||
expr = fmt.Sprintf("%s == \"%s\"", integration.VarCharField, target)
|
expr = fmt.Sprintf("%s == \"%s\"", integration.VarCharField, target)
|
||||||
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
queryResult, err = c.Proxy.Query(ctx, &milvuspb.QueryRequest{
|
||||||
CollectionName: collectionName,
|
CollectionName: collectionName,
|
||||||
Expr: expr,
|
Expr: expr,
|
||||||
OutputFields: []string{integration.VarCharField},
|
OutputFields: []string{integration.VarCharField},
|
||||||
|
ConsistencyLevel: commonpb.ConsistencyLevel_Eventually,
|
||||||
})
|
})
|
||||||
err = merr.CheckRPCCall(queryResult, err)
|
err = merr.CheckRPCCall(queryResult, err)
|
||||||
s.NoError(err)
|
s.NoError(err)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user