test: add cases for gosdk v2 upsert (#34400)

- test: add cases for gosdk v2 upsert
- test: update clientv2 to client reference

issue: #33419

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>
This commit is contained in:
ThreadDao 2024-07-04 14:26:09 +08:00 committed by GitHub
parent 10b3ce24ba
commit e4cece8de8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 791 additions and 329 deletions

View File

@ -5,7 +5,7 @@ go 1.21
toolchain go1.21.10 toolchain go1.21.10
require ( require (
github.com/milvus-io/milvus/client/v2 v2.0.0-20240625063004-b12c34a8baf2 github.com/milvus-io/milvus/client/v2 v2.0.0-20240703023208-fb61344dc9b5
github.com/milvus-io/milvus/pkg v0.0.2-0.20240317152703-17b4938985f3 github.com/milvus-io/milvus/pkg v0.0.2-0.20240317152703-17b4938985f3
github.com/quasilyte/go-ruleguard/dsl v0.3.22 github.com/quasilyte/go-ruleguard/dsl v0.3.22
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
@ -14,7 +14,7 @@ require (
google.golang.org/grpc v1.64.0 google.golang.org/grpc v1.64.0
) )
// replace github.com/milvus-io/milvus/client/v2 v2.0.0-20240625063004-b12c34a8baf2 => ../../../milvus/client // replace github.com/milvus-io/milvus/client/v2 v2.0.0-20240703023208-fb61344dc9b5 => ../../../milvus/client
require ( require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect

View File

@ -403,8 +403,8 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/milvus-io/milvus-proto/go-api/v2 v2.4.3 h1:KUSaWVePVlHMIluAXf2qmNffI1CMlGFLLiP+4iy9014= github.com/milvus-io/milvus-proto/go-api/v2 v2.4.3 h1:KUSaWVePVlHMIluAXf2qmNffI1CMlGFLLiP+4iy9014=
github.com/milvus-io/milvus-proto/go-api/v2 v2.4.3/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek= github.com/milvus-io/milvus-proto/go-api/v2 v2.4.3/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek=
github.com/milvus-io/milvus/client/v2 v2.0.0-20240625063004-b12c34a8baf2 h1:Eb3E5TQwNAImS2M1yRNc1/IzlfD8iQJ9HZt8Lf41xVc= github.com/milvus-io/milvus/client/v2 v2.0.0-20240703023208-fb61344dc9b5 h1:jsMriUhlv82KS34VV6y/SDpeL+MEWcO6nR4Ur1diEf8=
github.com/milvus-io/milvus/client/v2 v2.0.0-20240625063004-b12c34a8baf2/go.mod h1:thfuEkUztRRmQ+qu4hCoO/6uxDJoUVNNx4vHqx9yh5I= github.com/milvus-io/milvus/client/v2 v2.0.0-20240703023208-fb61344dc9b5/go.mod h1:13uL9ukc9KRK5ZtWqWwaORWlRccZLIysZzT6KUlOx+A=
github.com/milvus-io/milvus/pkg v0.0.2-0.20240317152703-17b4938985f3 h1:ZBpRWhBa7FTFxW4YYVv9AUESoW1Xyb3KNXTzTqfkZmw= github.com/milvus-io/milvus/pkg v0.0.2-0.20240317152703-17b4938985f3 h1:ZBpRWhBa7FTFxW4YYVv9AUESoW1Xyb3KNXTzTqfkZmw=
github.com/milvus-io/milvus/pkg v0.0.2-0.20240317152703-17b4938985f3/go.mod h1:jQ2BUZny1COsgv1Qbcv8dmbppW+V9J/c4YQZNb3EOm8= github.com/milvus-io/milvus/pkg v0.0.2-0.20240317152703-17b4938985f3/go.mod h1:jQ2BUZny1COsgv1Qbcv8dmbppW+V9J/c4YQZNb3EOm8=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
clientv2 "github.com/milvus-io/milvus/client/v2" "github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/client/v2/entity" "github.com/milvus-io/milvus/client/v2/entity"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/tests/go_client/common" "github.com/milvus-io/milvus/tests/go_client/common"
@ -25,16 +25,16 @@ func TestCreateCollection(t *testing.T) {
for _, collectionFieldsType := range []hp.CollectionFieldsType{hp.Int64Vec, hp.VarcharBinary, hp.Int64VarcharSparseVec, hp.AllFields} { for _, collectionFieldsType := range []hp.CollectionFieldsType{hp.Int64Vec, hp.VarcharBinary, hp.Int64VarcharSparseVec, hp.AllFields} {
fields := hp.FieldsFact.GenFieldsForCollection(collectionFieldsType, hp.TNewFieldsOption()) fields := hp.FieldsFact.GenFieldsForCollection(collectionFieldsType, hp.TNewFieldsOption())
schema := hp.GenSchema(hp.TNewSchemaOption().TWithFields(fields)) schema := hp.GenSchema(hp.TNewSchemaOption().TWithFields(fields))
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(schema.CollectionName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(schema.CollectionName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// has collections and verify // has collections and verify
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
// list collections and verify // list collections and verify
collections, err := mc.ListCollections(ctx, clientv2.NewListCollectionOption()) collections, err := mc.ListCollections(ctx, client.NewListCollectionOption())
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Contains(t, collections, schema.CollectionName) require.Contains(t, collections, schema.CollectionName)
} }
@ -51,18 +51,18 @@ func TestCreateAutoIdCollectionField(t *testing.T) {
// pk field with name // pk field with name
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, coll.Schema.AutoID) require.True(t, coll.Schema.AutoID)
require.True(t, coll.Schema.Fields[0].AutoID) require.True(t, coll.Schema.Fields[0].AutoID)
// insert // insert
vecColumn := hp.GenColumnData(common.DefaultNb, vecField.DataType, *hp.TNewDataOption()) vecColumn := hp.GenColumnData(common.DefaultNb, vecField.DataType, *hp.TNewDataOption())
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, vecColumn)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, vecColumn))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
} }
} }
@ -78,11 +78,11 @@ func TestCreateCollectionShards(t *testing.T) {
// pk field with name // pk field with name
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema).WithShardNum(shard)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema).WithShardNum(shard))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
if shard < 1 { if shard < 1 {
shard = 1 shard = 1
@ -104,18 +104,18 @@ func TestCreateAutoIdCollectionSchema(t *testing.T) {
// pk field with name // pk field with name
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithAutoID(true) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithAutoID(true)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
log.Info("schema autoID", zap.Bool("schemaAuto", coll.Schema.AutoID)) log.Info("schema autoID", zap.Bool("schemaAuto", coll.Schema.AutoID))
log.Info("field autoID", zap.Bool("fieldAuto", coll.Schema.Fields[0].AutoID)) log.Info("field autoID", zap.Bool("fieldAuto", coll.Schema.Fields[0].AutoID))
// insert // insert
vecColumn := hp.GenColumnData(common.DefaultNb, vecField.DataType, *hp.TNewDataOption()) vecColumn := hp.GenColumnData(common.DefaultNb, vecField.DataType, *hp.TNewDataOption())
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, vecColumn)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, vecColumn))
common.CheckErr(t, err, false, "field pk not passed") common.CheckErr(t, err, false, "field pk not passed")
} }
} }
@ -133,18 +133,18 @@ func TestCreateAutoIdCollection(t *testing.T) {
// pk field with name // pk field with name
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema).WithAutoID(true)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema).WithAutoID(true))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
log.Info("schema autoID", zap.Bool("schemaAuto", coll.Schema.AutoID)) log.Info("schema autoID", zap.Bool("schemaAuto", coll.Schema.AutoID))
log.Info("field autoID", zap.Bool("fieldAuto", coll.Schema.Fields[0].AutoID)) log.Info("field autoID", zap.Bool("fieldAuto", coll.Schema.Fields[0].AutoID))
// insert // insert
vecColumn := hp.GenColumnData(common.DefaultNb, vecField.DataType, *hp.TNewDataOption()) vecColumn := hp.GenColumnData(common.DefaultNb, vecField.DataType, *hp.TNewDataOption())
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, vecColumn)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, vecColumn))
common.CheckErr(t, err, false, "field pk not passed") common.CheckErr(t, err, false, "field pk not passed")
} }
} }
@ -160,11 +160,11 @@ func TestCreateJsonCollection(t *testing.T) {
// pk field with name // pk field with name
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(jsonField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(jsonField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
} }
@ -188,11 +188,11 @@ func TestCreateArrayCollections(t *testing.T) {
} }
// pk field with name // pk field with name
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
} }
@ -211,10 +211,10 @@ func TestCreateCollectionPartitionKey(t *testing.T) {
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(partitionKeyField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(partitionKeyField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
for _, field := range coll.Schema.Fields { for _, field := range coll.Schema.Fields {
@ -224,7 +224,7 @@ func TestCreateCollectionPartitionKey(t *testing.T) {
} }
// verify partitions // verify partitions
partitions, err := mc.ListPartitions(ctx, clientv2.NewListPartitionOption(collName)) partitions, err := mc.ListPartitions(ctx, client.NewListPartitionOption(collName))
require.Len(t, partitions, common.DefaultPartitionNum) require.Len(t, partitions, common.DefaultPartitionNum)
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
} }
@ -245,11 +245,11 @@ func TestCreateCollectionPartitionKeyNumPartition(t *testing.T) {
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(partitionKeyField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(partitionKeyField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify partitions num // verify partitions num
partitions, err := mc.ListPartitions(ctx, clientv2.NewListPartitionOption(collName)) partitions, err := mc.ListPartitions(ctx, client.NewListPartitionOption(collName))
require.Len(t, partitions, int(numPartition)) require.Len(t, partitions, int(numPartition))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
} }
@ -265,15 +265,15 @@ func TestCreateCollectionDynamicSchema(t *testing.T) {
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithDynamicFieldEnabled(true) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithDynamicFieldEnabled(true)
// pk field with name // pk field with name
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(schema.CollectionName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, coll.Schema.EnableDynamicField) require.True(t, coll.Schema.EnableDynamicField)
@ -282,7 +282,7 @@ func TestCreateCollectionDynamicSchema(t *testing.T) {
varcharColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, columnOption) varcharColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, columnOption)
vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, columnOption) vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, columnOption)
dynamicData := hp.GenDynamicColumnData(0, common.DefaultNb) dynamicData := hp.GenDynamicColumnData(0, common.DefaultNb)
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, varcharColumn, vecColumn).WithColumns(dynamicData...)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, varcharColumn, vecColumn).WithColumns(dynamicData...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
} }
@ -297,15 +297,15 @@ func TestCreateCollectionDynamic(t *testing.T) {
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField)
// pk field with name // pk field with name
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema).WithDynamicSchema(true)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema).WithDynamicSchema(true))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(schema.CollectionName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(schema.CollectionName))
log.Info("collection dynamic", zap.Bool("collectionSchema", coll.Schema.EnableDynamicField)) log.Info("collection dynamic", zap.Bool("collectionSchema", coll.Schema.EnableDynamicField))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// require.True(t, coll.Schema.Fields[0].IsDynamic) // require.True(t, coll.Schema.Fields[0].IsDynamic)
@ -315,7 +315,7 @@ func TestCreateCollectionDynamic(t *testing.T) {
varcharColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, columnOption) varcharColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, columnOption)
vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, columnOption) vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, columnOption)
dynamicData := hp.GenDynamicColumnData(0, common.DefaultNb) dynamicData := hp.GenDynamicColumnData(0, common.DefaultNb)
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, varcharColumn, vecColumn).WithColumns(dynamicData...)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, varcharColumn, vecColumn).WithColumns(dynamicData...))
common.CheckErr(t, err, false, "field dynamicNumber does not exist") common.CheckErr(t, err, false, "field dynamicNumber does not exist")
} }
@ -333,11 +333,11 @@ func TestCreateCollectionAllFields(t *testing.T) {
} }
// pk field with name // pk field with name
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
} }
@ -352,11 +352,11 @@ func TestCreateCollectionSparseVector(t *testing.T) {
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(sparseVecField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(sparseVecField)
// pk field with name // pk field with name
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema).WithDynamicSchema(true)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema).WithDynamicSchema(true))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
has, err := mc.HasCollection(ctx, clientv2.NewHasCollectionOption(schema.CollectionName)) has, err := mc.HasCollection(ctx, client.NewHasCollectionOption(schema.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.True(t, has) require.True(t, has)
} }
@ -375,11 +375,11 @@ func TestCreateCollectionWithValidFieldName(t *testing.T) {
pkField := entity.NewField().WithName(name).WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true) pkField := entity.NewField().WithName(name).WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)
vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim) vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim)
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify field name // verify field name
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, name, coll.Schema.Fields[0].Name) require.Equal(t, name, coll.Schema.Fields[0].Name)
} }
@ -404,14 +404,14 @@ func TestCreateCollectionWithValidName(t *testing.T) {
for _, name := range common.GenValidNames() { for _, name := range common.GenValidNames() {
schema := genDefaultSchema().WithName(name) schema := genDefaultSchema().WithName(name)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(name, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(name, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
collections, err := mc.ListCollections(ctx, clientv2.NewListCollectionOption()) collections, err := mc.ListCollections(ctx, client.NewListCollectionOption())
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Contains(t, collections, name) require.Contains(t, collections, name)
err = mc.DropCollection(ctx, clientv2.NewDropCollectionOption(name)) err = mc.DropCollection(ctx, client.NewDropCollectionOption(name))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
} }
} }
@ -429,7 +429,7 @@ func TestCreateCollectionWithInvalidFieldName(t *testing.T) {
pkField := entity.NewField().WithName(invalidName).WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true) pkField := entity.NewField().WithName(invalidName).WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)
vecField := entity.NewField().WithName("vec").WithDataType(entity.FieldTypeFloatVector).WithDim(128) vecField := entity.NewField().WithName("vec").WithDataType(entity.FieldTypeFloatVector).WithDim(128)
schema := entity.NewSchema().WithName("aaa").WithField(pkField).WithField(vecField) schema := entity.NewSchema().WithName("aaa").WithField(pkField).WithField(vecField)
collOpt := clientv2.NewCreateCollectionOption("aaa", schema) collOpt := client.NewCreateCollectionOption("aaa", schema)
err := mc.CreateCollection(ctx, collOpt) err := mc.CreateCollection(ctx, collOpt)
common.CheckErr(t, err, false, "field name should not be empty", common.CheckErr(t, err, false, "field name should not be empty",
@ -450,7 +450,7 @@ func TestCreateCollectionWithInvalidCollectionName(t *testing.T) {
// create collection and schema no name // create collection and schema no name
schema := genDefaultSchema() schema := genDefaultSchema()
err2 := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err2 := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err2, false, "collection name should not be empty") common.CheckErr(t, err2, false, "collection name should not be empty")
// create collection with invalid schema name // create collection with invalid schema name
@ -459,7 +459,7 @@ func TestCreateCollectionWithInvalidCollectionName(t *testing.T) {
// schema has invalid name // schema has invalid name
schema.WithName(invalidName) schema.WithName(invalidName)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "collection name should not be empty", common.CheckErr(t, err, false, "collection name should not be empty",
"the first character of a collection name must be an underscore or letter", "the first character of a collection name must be an underscore or letter",
"collection name can only contain numbers, letters and underscores", "collection name can only contain numbers, letters and underscores",
@ -467,13 +467,13 @@ func TestCreateCollectionWithInvalidCollectionName(t *testing.T) {
// collection option has invalid name // collection option has invalid name
schema.WithName(collName) schema.WithName(collName)
err2 := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(invalidName, schema)) err2 := mc.CreateCollection(ctx, client.NewCreateCollectionOption(invalidName, schema))
common.CheckErr(t, err2, false, "collection name matches schema name") common.CheckErr(t, err2, false, "collection name matches schema name")
} }
// collection name not equal to schema name // collection name not equal to schema name
schema.WithName(collName) schema.WithName(collName)
err3 := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(common.GenRandomString("pre", 4), schema)) err3 := mc.CreateCollection(ctx, client.NewCreateCollectionOption(common.GenRandomString("pre", 4), schema))
common.CheckErr(t, err3, false, "collection name matches schema name") common.CheckErr(t, err3, false, "collection name matches schema name")
} }
@ -507,7 +507,7 @@ func TestCreateCollectionInvalidFields(t *testing.T) {
for _, field := range invalidField.fields { for _, field := range invalidField.fields {
schema.WithField(field) schema.WithField(field)
} }
collOpt := clientv2.NewCreateCollectionOption(collName, schema) collOpt := client.NewCreateCollectionOption(collName, schema)
err := mc.CreateCollection(ctx, collOpt) err := mc.CreateCollection(ctx, collOpt)
common.CheckErr(t, err, false, invalidField.errMsg) common.CheckErr(t, err, false, invalidField.errMsg)
} }
@ -521,13 +521,13 @@ func TestCreateCollectionInvalidAutoPkField(t *testing.T) {
// create collection with autoID true or not // create collection with autoID true or not
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
for _, autoId := range []bool{true, false} { for _, autoId := range [2]bool{true, false} {
vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim) vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim)
// pk field type: non-int64 and non-varchar // pk field type: non-int64 and non-varchar
for _, fieldType := range hp.GetInvalidPkFieldType() { for _, fieldType := range hp.GetInvalidPkFieldType() {
invalidPkField := entity.NewField().WithName("pk").WithDataType(fieldType).WithIsPrimaryKey(true) invalidPkField := entity.NewField().WithName("pk").WithDataType(fieldType).WithIsPrimaryKey(true)
schema := entity.NewSchema().WithName(collName).WithField(vecField).WithField(invalidPkField).WithAutoID(autoId) schema := entity.NewSchema().WithName(collName).WithField(vecField).WithField(invalidPkField).WithAutoID(autoId)
errNonInt64Field := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) errNonInt64Field := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, errNonInt64Field, false, "the data type of primary key should be Int64 or VarChar") common.CheckErr(t, errNonInt64Field, false, "the data type of primary key should be Int64 or VarChar")
} }
} }
@ -546,12 +546,12 @@ func TestCreateCollectionDuplicateField(t *testing.T) {
// two vector fields have same name // two vector fields have same name
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(vecField)
errDupField := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) errDupField := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, errDupField, false, "duplicated field name") common.CheckErr(t, errDupField, false, "duplicated field name")
// two named "id" fields, one is pk field and other is scalar field // two named "id" fields, one is pk field and other is scalar field
schema2 := entity.NewSchema().WithName(collName).WithField(pkField).WithField(pkField2).WithField(vecField).WithAutoID(true) schema2 := entity.NewSchema().WithName(collName).WithField(pkField).WithField(pkField2).WithField(vecField).WithAutoID(true)
errDupField2 := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema2)) errDupField2 := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema2))
common.CheckErr(t, errDupField2, false, "duplicated field name") common.CheckErr(t, errDupField2, false, "duplicated field name")
} }
@ -564,7 +564,6 @@ func TestCreateCollectionInvalidPartitionKeyType(t *testing.T) {
vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim) vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim)
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
t.Parallel()
for _, fieldType := range hp.GetInvalidPartitionKeyFieldType() { for _, fieldType := range hp.GetInvalidPartitionKeyFieldType() {
log.Debug("TestCreateCollectionInvalidPartitionKeyType", zap.Any("partitionKeyFieldType", fieldType)) log.Debug("TestCreateCollectionInvalidPartitionKeyType", zap.Any("partitionKeyFieldType", fieldType))
partitionKeyField := entity.NewField().WithName("parKey").WithDataType(fieldType).WithIsPartitionKey(true) partitionKeyField := entity.NewField().WithName("parKey").WithDataType(fieldType).WithIsPartitionKey(true)
@ -572,7 +571,7 @@ func TestCreateCollectionInvalidPartitionKeyType(t *testing.T) {
partitionKeyField.WithElementType(entity.FieldTypeInt64) partitionKeyField.WithElementType(entity.FieldTypeInt64)
} }
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(partitionKeyField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(partitionKeyField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "the data type of partition key should be Int64 or VarChar") common.CheckErr(t, err, false, "the data type of partition key should be Int64 or VarChar")
} }
} }
@ -587,7 +586,7 @@ func TestCreateCollectionPartitionKeyPk(t *testing.T) {
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "the partition key field must not be primary field") common.CheckErr(t, err, false, "the partition key field must not be primary field")
} }
@ -604,7 +603,7 @@ func TestCreateCollectionPartitionKeyNum(t *testing.T) {
pkField2 := entity.NewField().WithName("pk_2").WithDataType(entity.FieldTypeVarChar).WithMaxLength(common.TestMaxLen).WithIsPartitionKey(true) pkField2 := entity.NewField().WithName("pk_2").WithDataType(entity.FieldTypeVarChar).WithMaxLength(common.TestMaxLen).WithIsPartitionKey(true)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(pkField1).WithField(pkField2) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(pkField1).WithField(pkField2)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "there are more than one partition key") common.CheckErr(t, err, false, "there are more than one partition key")
} }
@ -630,7 +629,7 @@ func TestPartitionKeyInvalidNumPartition(t *testing.T) {
} }
for _, npStruct := range invalidNumPartitionStruct { for _, npStruct := range invalidNumPartitionStruct {
// create collection with num partitions // create collection with num partitions
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, npStruct.errMsg) common.CheckErr(t, err, false, npStruct.errMsg)
} }
} }
@ -646,7 +645,7 @@ func TestCreateCollectionMultiAutoId(t *testing.T) {
entity.NewField().WithName("dupInt").WithDataType(entity.FieldTypeInt64).WithIsAutoID(true)).WithField( entity.NewField().WithName("dupInt").WithDataType(entity.FieldTypeInt64).WithIsAutoID(true)).WithField(
entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim), entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim),
).WithName(collName) ).WithName(collName)
errMultiAuto := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) errMultiAuto := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, errMultiAuto, false, "only one field can speficy AutoID with true") common.CheckErr(t, errMultiAuto, false, "only one field can speficy AutoID with true")
} }
@ -665,11 +664,11 @@ func TestCreateCollectionInconsistentAutoId(t *testing.T) {
).WithName(collName).WithAutoID(!autoId) ).WithName(collName).WithAutoID(!autoId)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// describe collection // describe collection
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.EqualValues(t, autoId, coll.Schema.AutoID) require.EqualValues(t, autoId, coll.Schema.AutoID)
for _, field := range coll.Schema.Fields { for _, field := range coll.Schema.Fields {
@ -694,10 +693,10 @@ func TestCreateCollectionDescription(t *testing.T) {
vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim) vecField := entity.NewField().WithName(common.DefaultFloatVecFieldName).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim)
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithDescription(schemaDesc) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithDescription(schemaDesc)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
coll, err := mc.DescribeCollection(ctx, clientv2.NewDescribeCollectionOption(collName)) coll, err := mc.DescribeCollection(ctx, client.NewDescribeCollectionOption(collName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.EqualValues(t, schemaDesc, coll.Schema.Description) require.EqualValues(t, schemaDesc, coll.Schema.Description)
for _, field := range coll.Schema.Fields { for _, field := range coll.Schema.Fields {
@ -738,7 +737,7 @@ func TestCreateBinaryCollectionInvalidDim(t *testing.T) {
).WithName(collName) ).WithName(collName)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, invalidDim.errMsg) common.CheckErr(t, err, false, invalidDim.errMsg)
} }
} }
@ -774,7 +773,7 @@ func TestCreateFloatCollectionInvalidDim(t *testing.T) {
).WithName(collName) ).WithName(collName)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, invalidDim.errMsg) common.CheckErr(t, err, false, invalidDim.errMsg)
} }
} }
@ -789,7 +788,7 @@ func TestCreateVectorWithoutDim(t *testing.T) {
entity.NewField().WithName(common.DefaultInt64FieldName).WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)).WithField( entity.NewField().WithName(common.DefaultInt64FieldName).WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)).WithField(
entity.NewField().WithName("vec").WithDataType(entity.FieldTypeFloatVector), entity.NewField().WithName("vec").WithDataType(entity.FieldTypeFloatVector),
).WithName(collName) ).WithName(collName)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "dimension is not defined in field type params, check type param `dim` for vector field") common.CheckErr(t, err, false, "dimension is not defined in field type params, check type param `dim` for vector field")
} }
@ -805,7 +804,7 @@ func TestCreateCollectionSparseVectorWithDim(t *testing.T) {
).WithName(collName) ).WithName(collName)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "dim should not be specified for sparse vector field sparse") common.CheckErr(t, err, false, "dim should not be specified for sparse vector field sparse")
} }
@ -821,13 +820,13 @@ func TestCreateArrayFieldInvalidCapacity(t *testing.T) {
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(arrayField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(arrayField)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "type param(max_capacity) should be specified for array field") common.CheckErr(t, err, false, "type param(max_capacity) should be specified for array field")
// invalid Capacity // invalid Capacity
for _, invalidCapacity := range []int64{-1, 0, common.MaxCapacity + 1} { for _, invalidCapacity := range []int64{-1, 0, common.MaxCapacity + 1} {
arrayField.WithMaxCapacity(invalidCapacity) arrayField.WithMaxCapacity(invalidCapacity)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "the maximum capacity specified for a Array should be in (0, 4096]") common.CheckErr(t, err, false, "the maximum capacity specified for a Array should be in (0, 4096]")
} }
} }
@ -845,13 +844,13 @@ func TestCreateVarcharArrayInvalidLength(t *testing.T) {
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(arrayVarcharField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(arrayVarcharField)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "type param(max_length) should be specified for varChar field") common.CheckErr(t, err, false, "type param(max_length) should be specified for varChar field")
// invalid Capacity // invalid Capacity
for _, invalidLength := range []int64{-1, 0, common.MaxLength + 1} { for _, invalidLength := range []int64{-1, 0, common.MaxLength + 1} {
arrayVarcharField.WithMaxLength(invalidLength) arrayVarcharField.WithMaxLength(invalidLength)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "the maximum length specified for a VarChar should be in (0, 65535]") common.CheckErr(t, err, false, "the maximum length specified for a VarChar should be in (0, 65535]")
} }
} }
@ -868,13 +867,13 @@ func TestCreateVarcharInvalidLength(t *testing.T) {
schema := entity.NewSchema().WithName(collName).WithField(varcharField).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(varcharField).WithField(vecField)
// create collection // create collection
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "type param(max_length) should be specified for varChar field") common.CheckErr(t, err, false, "type param(max_length) should be specified for varChar field")
// invalid Capacity // invalid Capacity
for _, invalidLength := range []int64{-1, 0, common.MaxLength + 1} { for _, invalidLength := range []int64{-1, 0, common.MaxLength + 1} {
varcharField.WithMaxLength(invalidLength) varcharField.WithMaxLength(invalidLength)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, "the maximum length specified for a VarChar should be in (0, 65535]") common.CheckErr(t, err, false, "the maximum length specified for a VarChar should be in (0, 65535]")
} }
} }
@ -890,7 +889,7 @@ func TestCreateArrayNotSupportedFieldType(t *testing.T) {
for _, fieldType := range []entity.FieldType{entity.FieldTypeArray, entity.FieldTypeJSON, entity.FieldTypeBinaryVector, entity.FieldTypeFloatVector} { for _, fieldType := range []entity.FieldType{entity.FieldTypeArray, entity.FieldTypeJSON, entity.FieldTypeBinaryVector, entity.FieldTypeFloatVector} {
field := entity.NewField().WithName("array").WithDataType(entity.FieldTypeArray).WithElementType(fieldType) field := entity.NewField().WithName("array").WithDataType(entity.FieldTypeArray).WithElementType(fieldType)
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(field) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(field)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, fmt.Sprintf("element type %s is not supported", fieldType.Name())) common.CheckErr(t, err, false, fmt.Sprintf("element type %s is not supported", fieldType.Name()))
} }
} }
@ -907,7 +906,7 @@ func TestCreateMultiVectorExceed(t *testing.T) {
vecField := entity.NewField().WithName(fmt.Sprintf("vec_%d", i)).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim) vecField := entity.NewField().WithName(fmt.Sprintf("vec_%d", i)).WithDataType(entity.FieldTypeFloatVector).WithDim(common.DefaultDim)
schema.WithField(vecField) schema.WithField(vecField)
} }
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, false, fmt.Sprintf("maximum vector field's number should be limited to %d", common.MaxVectorFieldNum)) common.CheckErr(t, err, false, fmt.Sprintf("maximum vector field's number should be limited to %d", common.MaxVectorFieldNum))
} }
@ -922,7 +921,7 @@ func TestCreateCollectionInvalidShards(t *testing.T) {
// pk field with name // pk field with name
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema).WithShardNum(shard)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema).WithShardNum(shard))
common.CheckErr(t, err, false, fmt.Sprintf("maximum shards's number should be limited to %d", common.MaxShardNum)) common.CheckErr(t, err, false, fmt.Sprintf("maximum shards's number should be limited to %d", common.MaxShardNum))
} }
} }
@ -946,7 +945,7 @@ func TestCreateCollectionInvalid(t *testing.T) {
{schema: entity.NewSchema().WithName("aaa").WithField(vecField).WithField(entity.NewField().WithIsPrimaryKey(true).WithDataType(entity.FieldTypeVarChar)), errMsg: "field name should not be empty"}, {schema: entity.NewSchema().WithName("aaa").WithField(vecField).WithField(entity.NewField().WithIsPrimaryKey(true).WithDataType(entity.FieldTypeVarChar)), errMsg: "field name should not be empty"},
} }
for _, mSchema := range mSchemaErrs { for _, mSchema := range mSchemaErrs {
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, mSchema.schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, mSchema.schema))
common.CheckErr(t, err, false, mSchema.errMsg) common.CheckErr(t, err, false, mSchema.errMsg)
} }
} }

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
clientv2 "github.com/milvus-io/milvus/client/v2" "github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/tests/go_client/base" "github.com/milvus-io/milvus/tests/go_client/base"
"github.com/milvus-io/milvus/tests/go_client/common" "github.com/milvus-io/milvus/tests/go_client/common"
@ -23,15 +23,15 @@ func teardownTest(t *testing.T) func(t *testing.T) {
// drop all db // drop all db
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout) ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t) mc := createDefaultMilvusClient(ctx, t)
dbs, _ := mc.ListDatabases(ctx, clientv2.NewListDatabaseOption()) dbs, _ := mc.ListDatabases(ctx, client.NewListDatabaseOption())
for _, db := range dbs { for _, db := range dbs {
if db != common.DefaultDb { if db != common.DefaultDb {
_ = mc.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(db)) _ = mc.UsingDatabase(ctx, client.NewUsingDatabaseOption(db))
collections, _ := mc.ListCollections(ctx, clientv2.NewListCollectionOption()) collections, _ := mc.ListCollections(ctx, client.NewListCollectionOption())
for _, coll := range collections { for _, coll := range collections {
_ = mc.DropCollection(ctx, clientv2.NewDropCollectionOption(coll)) _ = mc.DropCollection(ctx, client.NewDropCollectionOption(coll))
} }
_ = mc.DropDatabase(ctx, clientv2.NewDropDatabaseOption(db)) _ = mc.DropDatabase(ctx, client.NewDropDatabaseOption(db))
} }
} }
} }
@ -46,73 +46,73 @@ func TestDatabase(t *testing.T) {
// create db1 // create db1
dbName1 := common.GenRandomString("db1", 4) dbName1 := common.GenRandomString("db1", 4)
err := clientDefault.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(dbName1)) err := clientDefault.CreateDatabase(ctx, client.NewCreateDatabaseOption(dbName1))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// list db and verify db1 in dbs // list db and verify db1 in dbs
dbs, errList := clientDefault.ListDatabases(ctx, clientv2.NewListDatabaseOption()) dbs, errList := clientDefault.ListDatabases(ctx, client.NewListDatabaseOption())
common.CheckErr(t, errList, true) common.CheckErr(t, errList, true)
require.Containsf(t, dbs, dbName1, fmt.Sprintf("%s db not in dbs: %v", dbName1, dbs)) require.Containsf(t, dbs, dbName1, fmt.Sprintf("%s db not in dbs: %v", dbName1, dbs))
// new client with db1 -> using db // new client with db1 -> using db
clientDB1 := createMilvusClient(ctx, t, &clientv2.ClientConfig{Address: *addr, DBName: dbName1}) clientDB1 := createMilvusClient(ctx, t, &client.ClientConfig{Address: *addr, DBName: dbName1})
t.Log("https://github.com/milvus-io/milvus/issues/34137") t.Log("https://github.com/milvus-io/milvus/issues/34137")
err = clientDB1.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(dbName1)) err = clientDB1.UsingDatabase(ctx, client.NewUsingDatabaseOption(dbName1))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// create collections -> verify collections contains // create collections -> verify collections contains
_, db1Col1 := hp.CollPrepare.CreateCollection(ctx, t, clientDB1, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, db1Col1 := hp.CollPrepare.CreateCollection(ctx, t, clientDB1, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
_, db1Col2 := hp.CollPrepare.CreateCollection(ctx, t, clientDB1, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, db1Col2 := hp.CollPrepare.CreateCollection(ctx, t, clientDB1, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
collections, errListCollections := clientDB1.ListCollections(ctx, clientv2.NewListCollectionOption()) collections, errListCollections := clientDB1.ListCollections(ctx, client.NewListCollectionOption())
common.CheckErr(t, errListCollections, true) common.CheckErr(t, errListCollections, true)
require.Containsf(t, collections, db1Col1.CollectionName, fmt.Sprintf("The collection %s not in: %v", db1Col1.CollectionName, collections)) require.Containsf(t, collections, db1Col1.CollectionName, fmt.Sprintf("The collection %s not in: %v", db1Col1.CollectionName, collections))
require.Containsf(t, collections, db1Col2.CollectionName, fmt.Sprintf("The collection %s not in: %v", db1Col2.CollectionName, collections)) require.Containsf(t, collections, db1Col2.CollectionName, fmt.Sprintf("The collection %s not in: %v", db1Col2.CollectionName, collections))
// create db2 // create db2
dbName2 := common.GenRandomString("db2", 4) dbName2 := common.GenRandomString("db2", 4)
err = clientDefault.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(dbName2)) err = clientDefault.CreateDatabase(ctx, client.NewCreateDatabaseOption(dbName2))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
dbs, err = clientDefault.ListDatabases(ctx, clientv2.NewListDatabaseOption()) dbs, err = clientDefault.ListDatabases(ctx, client.NewListDatabaseOption())
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Containsf(t, dbs, dbName2, fmt.Sprintf("%s db not in dbs: %v", dbName2, dbs)) require.Containsf(t, dbs, dbName2, fmt.Sprintf("%s db not in dbs: %v", dbName2, dbs))
// using db2 -> create collection -> drop collection // using db2 -> create collection -> drop collection
err = clientDefault.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(dbName2)) err = clientDefault.UsingDatabase(ctx, client.NewUsingDatabaseOption(dbName2))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
_, db2Col1 := hp.CollPrepare.CreateCollection(ctx, t, clientDefault, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, db2Col1 := hp.CollPrepare.CreateCollection(ctx, t, clientDefault, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
err = clientDefault.DropCollection(ctx, clientv2.NewDropCollectionOption(db2Col1.CollectionName)) err = clientDefault.DropCollection(ctx, client.NewDropCollectionOption(db2Col1.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// using empty db -> drop db2 // using empty db -> drop db2
clientDefault.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption("")) clientDefault.UsingDatabase(ctx, client.NewUsingDatabaseOption(""))
err = clientDefault.DropDatabase(ctx, clientv2.NewDropDatabaseOption(dbName2)) err = clientDefault.DropDatabase(ctx, client.NewDropDatabaseOption(dbName2))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// list db and verify db drop success // list db and verify db drop success
dbs, err = clientDefault.ListDatabases(ctx, clientv2.NewListDatabaseOption()) dbs, err = clientDefault.ListDatabases(ctx, client.NewListDatabaseOption())
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.NotContains(t, dbs, dbName2) require.NotContains(t, dbs, dbName2)
// drop db1 which has some collections // drop db1 which has some collections
err = clientDB1.DropDatabase(ctx, clientv2.NewDropDatabaseOption(dbName1)) err = clientDB1.DropDatabase(ctx, client.NewDropDatabaseOption(dbName1))
common.CheckErr(t, err, false, "must drop all collections before drop database") common.CheckErr(t, err, false, "must drop all collections before drop database")
// drop all db1's collections -> drop db1 // drop all db1's collections -> drop db1
clientDB1.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(dbName1)) clientDB1.UsingDatabase(ctx, client.NewUsingDatabaseOption(dbName1))
err = clientDB1.DropCollection(ctx, clientv2.NewDropCollectionOption(db1Col1.CollectionName)) err = clientDB1.DropCollection(ctx, client.NewDropCollectionOption(db1Col1.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
err = clientDB1.DropCollection(ctx, clientv2.NewDropCollectionOption(db1Col2.CollectionName)) err = clientDB1.DropCollection(ctx, client.NewDropCollectionOption(db1Col2.CollectionName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
err = clientDB1.DropDatabase(ctx, clientv2.NewDropDatabaseOption(dbName1)) err = clientDB1.DropDatabase(ctx, client.NewDropDatabaseOption(dbName1))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// drop default db // drop default db
err = clientDefault.DropDatabase(ctx, clientv2.NewDropDatabaseOption(common.DefaultDb)) err = clientDefault.DropDatabase(ctx, client.NewDropDatabaseOption(common.DefaultDb))
common.CheckErr(t, err, false, "can not drop default database") common.CheckErr(t, err, false, "can not drop default database")
dbs, err = clientDefault.ListDatabases(ctx, clientv2.NewListDatabaseOption()) dbs, err = clientDefault.ListDatabases(ctx, client.NewListDatabaseOption())
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Containsf(t, dbs, common.DefaultDb, fmt.Sprintf("The db %s not in: %v", common.DefaultDb, dbs)) require.Containsf(t, dbs, common.DefaultDb, fmt.Sprintf("The db %s not in: %v", common.DefaultDb, dbs))
} }
@ -126,18 +126,18 @@ func TestCreateDb(t *testing.T) {
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout) ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t) mc := createDefaultMilvusClient(ctx, t)
dbName := common.GenRandomString("db", 4) dbName := common.GenRandomString("db", 4)
err := mc.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(dbName)) err := mc.CreateDatabase(ctx, client.NewCreateDatabaseOption(dbName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// create existed db // create existed db
err = mc.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(dbName)) err = mc.CreateDatabase(ctx, client.NewCreateDatabaseOption(dbName))
common.CheckErr(t, err, false, fmt.Sprintf("database already exist: %s", dbName)) common.CheckErr(t, err, false, fmt.Sprintf("database already exist: %s", dbName))
// create default db // create default db
err = mc.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(common.DefaultDb)) err = mc.CreateDatabase(ctx, client.NewCreateDatabaseOption(common.DefaultDb))
common.CheckErr(t, err, false, fmt.Sprintf("database already exist: %s", common.DefaultDb)) common.CheckErr(t, err, false, fmt.Sprintf("database already exist: %s", common.DefaultDb))
emptyErr := mc.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption("")) emptyErr := mc.CreateDatabase(ctx, client.NewCreateDatabaseOption(""))
common.CheckErr(t, emptyErr, false, "database name couldn't be empty") common.CheckErr(t, emptyErr, false, "database name couldn't be empty")
} }
@ -147,7 +147,7 @@ func TestDropDb(t *testing.T) {
defer teardownSuite(t) defer teardownSuite(t)
// create collection in default db // create collection in default db
listCollOpt := clientv2.NewListCollectionOption() listCollOpt := client.NewListCollectionOption()
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout) ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t) mc := createDefaultMilvusClient(ctx, t)
_, defCol := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, defCol := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
@ -156,13 +156,13 @@ func TestDropDb(t *testing.T) {
// create db // create db
dbName := common.GenRandomString("db", 4) dbName := common.GenRandomString("db", 4)
err := mc.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(dbName)) err := mc.CreateDatabase(ctx, client.NewCreateDatabaseOption(dbName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// using db and drop the db // using db and drop the db
err = mc.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(dbName)) err = mc.UsingDatabase(ctx, client.NewUsingDatabaseOption(dbName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
err = mc.DropDatabase(ctx, clientv2.NewDropDatabaseOption(dbName)) err = mc.DropDatabase(ctx, client.NewDropDatabaseOption(dbName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// verify current db // verify current db
@ -170,21 +170,21 @@ func TestDropDb(t *testing.T) {
common.CheckErr(t, err, false, fmt.Sprintf("database not found[database=%s]", dbName)) common.CheckErr(t, err, false, fmt.Sprintf("database not found[database=%s]", dbName))
// using default db and verify collections // using default db and verify collections
err = mc.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(common.DefaultDb)) err = mc.UsingDatabase(ctx, client.NewUsingDatabaseOption(common.DefaultDb))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
collections, _ = mc.ListCollections(ctx, listCollOpt) collections, _ = mc.ListCollections(ctx, listCollOpt)
require.Contains(t, collections, defCol.CollectionName) require.Contains(t, collections, defCol.CollectionName)
// drop not existed db // drop not existed db
err = mc.DropDatabase(ctx, clientv2.NewDropDatabaseOption(common.GenRandomString("db", 4))) err = mc.DropDatabase(ctx, client.NewDropDatabaseOption(common.GenRandomString("db", 4)))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// drop empty db // drop empty db
err = mc.DropDatabase(ctx, clientv2.NewDropDatabaseOption("")) err = mc.DropDatabase(ctx, client.NewDropDatabaseOption(""))
common.CheckErr(t, err, false, "database name couldn't be empty") common.CheckErr(t, err, false, "database name couldn't be empty")
// drop default db // drop default db
err = mc.DropDatabase(ctx, clientv2.NewDropDatabaseOption(common.DefaultDb)) err = mc.DropDatabase(ctx, client.NewDropDatabaseOption(common.DefaultDb))
common.CheckErr(t, err, false, "can not drop default database") common.CheckErr(t, err, false, "can not drop default database")
} }
@ -194,7 +194,7 @@ func TestUsingDb(t *testing.T) {
defer teardownSuite(t) defer teardownSuite(t)
// create collection in default db // create collection in default db
listCollOpt := clientv2.NewListCollectionOption() listCollOpt := client.NewListCollectionOption()
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout) ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t) mc := createDefaultMilvusClient(ctx, t)
@ -205,17 +205,17 @@ func TestUsingDb(t *testing.T) {
// using not existed db // using not existed db
dbName := common.GenRandomString("db", 4) dbName := common.GenRandomString("db", 4)
err := mc.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(dbName)) err := mc.UsingDatabase(ctx, client.NewUsingDatabaseOption(dbName))
common.CheckErr(t, err, false, fmt.Sprintf("database not found[database=%s]", dbName)) common.CheckErr(t, err, false, fmt.Sprintf("database not found[database=%s]", dbName))
// using empty db // using empty db
err = mc.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption("")) err = mc.UsingDatabase(ctx, client.NewUsingDatabaseOption(""))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
collections, _ = mc.ListCollections(ctx, listCollOpt) collections, _ = mc.ListCollections(ctx, listCollOpt)
require.Contains(t, collections, col.CollectionName) require.Contains(t, collections, col.CollectionName)
// using current db // using current db
err = mc.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(common.DefaultDb)) err = mc.UsingDatabase(ctx, client.NewUsingDatabaseOption(common.DefaultDb))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
collections, _ = mc.ListCollections(ctx, listCollOpt) collections, _ = mc.ListCollections(ctx, listCollOpt)
require.Contains(t, collections, col.CollectionName) require.Contains(t, collections, col.CollectionName)
@ -226,15 +226,15 @@ func TestClientWithDb(t *testing.T) {
teardownSuite := teardownTest(t) teardownSuite := teardownTest(t)
defer teardownSuite(t) defer teardownSuite(t)
listCollOpt := clientv2.NewListCollectionOption() listCollOpt := client.NewListCollectionOption()
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout) ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
// connect with not existed db // connect with not existed db
_, err := base.NewMilvusClient(ctx, &clientv2.ClientConfig{Address: *addr, DBName: "dbName"}) _, err := base.NewMilvusClient(ctx, &client.ClientConfig{Address: *addr, DBName: "dbName"})
common.CheckErr(t, err, false, "database not found") common.CheckErr(t, err, false, "database not found")
// connect default db -> create a collection in default db // connect default db -> create a collection in default db
mcDefault, errDefault := base.NewMilvusClient(ctx, &clientv2.ClientConfig{ mcDefault, errDefault := base.NewMilvusClient(ctx, &client.ClientConfig{
Address: *addr, Address: *addr,
// DBName: common.DefaultDb, // DBName: common.DefaultDb,
}) })
@ -246,11 +246,11 @@ func TestClientWithDb(t *testing.T) {
// create a db and create collection in db // create a db and create collection in db
dbName := common.GenRandomString("db", 5) dbName := common.GenRandomString("db", 5)
err = mcDefault.CreateDatabase(ctx, clientv2.NewCreateDatabaseOption(dbName)) err = mcDefault.CreateDatabase(ctx, client.NewCreateDatabaseOption(dbName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// and connect with db // and connect with db
mcDb, err := base.NewMilvusClient(ctx, &clientv2.ClientConfig{ mcDb, err := base.NewMilvusClient(ctx, &client.ClientConfig{
Address: *addr, Address: *addr,
DBName: dbName, DBName: dbName,
}) })
@ -262,12 +262,12 @@ func TestClientWithDb(t *testing.T) {
require.Containsf(t, dbCollections, dbCol1.CollectionName, fmt.Sprintf("The collection %s not in: %v", dbCol1.CollectionName, dbCollections)) require.Containsf(t, dbCollections, dbCol1.CollectionName, fmt.Sprintf("The collection %s not in: %v", dbCol1.CollectionName, dbCollections))
// using default db and collection not in // using default db and collection not in
_ = mcDb.UsingDatabase(ctx, clientv2.NewUsingDatabaseOption(common.DefaultDb)) _ = mcDb.UsingDatabase(ctx, client.NewUsingDatabaseOption(common.DefaultDb))
defCollections, _ = mcDb.ListCollections(ctx, listCollOpt) defCollections, _ = mcDb.ListCollections(ctx, listCollOpt)
require.NotContains(t, defCollections, dbCol1.CollectionName) require.NotContains(t, defCollections, dbCol1.CollectionName)
// connect empty db (actually default db) // connect empty db (actually default db)
mcEmpty, err := base.NewMilvusClient(ctx, &clientv2.ClientConfig{ mcEmpty, err := base.NewMilvusClient(ctx, &client.ClientConfig{
Address: *addr, Address: *addr,
DBName: "", DBName: "",
}) })

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
clientv2 "github.com/milvus-io/milvus/client/v2" "github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/client/v2/entity" "github.com/milvus-io/milvus/client/v2/entity"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/tests/go_client/common" "github.com/milvus-io/milvus/tests/go_client/common"
@ -34,18 +34,18 @@ func TestDelete(t *testing.T) {
// delete with expr // delete with expr
expr := fmt.Sprintf("%s < 10", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s < 10", common.DefaultInt64FieldName)
ids := []int64{10, 11, 12, 13, 14} ids := []int64{10, 11, 12, 13, 14}
delRes, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(expr)) delRes, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(10), delRes.DeleteCount) require.Equal(t, int64(10), delRes.DeleteCount)
// delete with int64 pk // delete with int64 pk
delRes, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, ids)) delRes, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, ids))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(5), delRes.DeleteCount) require.Equal(t, int64(5), delRes.DeleteCount)
// query, verify delete success // query, verify delete success
exprQuery := fmt.Sprintf("%s < 15", common.DefaultInt64FieldName) exprQuery := fmt.Sprintf("%s < 15", common.DefaultInt64FieldName)
queryRes, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Zero(t, queryRes.ResultCount) require.Zero(t, queryRes.ResultCount)
} }
@ -70,17 +70,17 @@ func TestDeleteVarcharPks(t *testing.T) {
// delete varchar with pk // delete varchar with pk
ids := []string{"0", "1", "2", "3", "4"} ids := []string{"0", "1", "2", "3", "4"}
expr := "varchar like '1%' " expr := "varchar like '1%' "
delRes, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, ids)) delRes, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, ids))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(5), delRes.DeleteCount) require.Equal(t, int64(5), delRes.DeleteCount)
delRes, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(expr)) delRes, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(1110), delRes.DeleteCount) require.Equal(t, int64(1110), delRes.DeleteCount)
// query, verify delete success // query, verify delete success
exprQuery := "varchar like '1%' and varchar not in ['0', '1', '2', '3', '4'] " exprQuery := "varchar like '1%' and varchar not in ['0', '1', '2', '3', '4'] "
queryRes, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Zero(t, queryRes.ResultCount) require.Zero(t, queryRes.ResultCount)
} }
@ -96,7 +96,7 @@ func TestDeleteEmptyCollection(t *testing.T) {
// delete expr-in from empty collection // delete expr-in from empty collection
delExpr := fmt.Sprintf("%s in [0]", common.DefaultInt64FieldName) delExpr := fmt.Sprintf("%s in [0]", common.DefaultInt64FieldName)
delRes, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(delExpr)) delRes, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(delExpr))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(1), delRes.DeleteCount) require.Equal(t, int64(1), delRes.DeleteCount)
@ -105,7 +105,7 @@ func TestDeleteEmptyCollection(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
comExpr := fmt.Sprintf("%s < 10", common.DefaultInt64FieldName) comExpr := fmt.Sprintf("%s < 10", common.DefaultInt64FieldName)
delRes, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(comExpr)) delRes, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(comExpr))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(0), delRes.DeleteCount) require.Equal(t, int64(0), delRes.DeleteCount)
} }
@ -116,14 +116,14 @@ func TestDeleteNotExistName(t *testing.T) {
mc := createDefaultMilvusClient(ctx, t) mc := createDefaultMilvusClient(ctx, t)
// delete from not existed collection // delete from not existed collection
_, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption("aaa").WithExpr("")) _, errDelete := mc.Delete(ctx, client.NewDeleteOption("aaa").WithExpr(""))
common.CheckErr(t, errDelete, false, "collection not found") common.CheckErr(t, errDelete, false, "collection not found")
// delete from not existed partition // delete from not existed partition
cp := hp.NewCreateCollectionParams(hp.Int64Vec) cp := hp.NewCreateCollectionParams(hp.Int64Vec)
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
_, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithPartition("aaa")) _, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithPartition("aaa"))
common.CheckErr(t, errDelete, false, "partition not found[partition=aaa]") common.CheckErr(t, errDelete, false, "partition not found[partition=aaa]")
} }
@ -142,22 +142,22 @@ func TestDeleteComplexExprWithoutLoad(t *testing.T) {
prepare.FlushData(ctx, t, mc, schema.CollectionName) prepare.FlushData(ctx, t, mc, schema.CollectionName)
idsPk := []int64{0, 1, 2, 3, 4} idsPk := []int64{0, 1, 2, 3, 4}
_, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, idsPk)) _, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, idsPk))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
_, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, []string{"0", "1"})) _, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, []string{"0", "1"}))
common.CheckErr(t, errDelete, false, "collection not loaded") common.CheckErr(t, errDelete, false, "collection not loaded")
// delete varchar with pk // delete varchar with pk
expr := fmt.Sprintf("%s < 100", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s < 100", common.DefaultInt64FieldName)
_, errDelete2 := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(expr)) _, errDelete2 := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr))
common.CheckErr(t, errDelete2, false, "collection not loaded") common.CheckErr(t, errDelete2, false, "collection not loaded")
// index and load collection // index and load collection
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema)) prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
res, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s >= 0 ", common.DefaultInt64FieldName)). res, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s >= 0 ", common.DefaultInt64FieldName)).
WithOutputFields([]string{common.QueryCountFieldName}).WithConsistencyLevel(entity.ClStrong)) WithOutputFields([]string{common.QueryCountFieldName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
count, _ := res.Fields[0].GetAsInt64(0) count, _ := res.Fields[0].GetAsInt64(0)
@ -174,20 +174,20 @@ func TestDeleteEmptyIds(t *testing.T) {
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
// delete // delete
_, err := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, nil)) _, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, nil))
common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in []") common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in []")
_, err = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, []int64{})) _, err = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, []int64{}))
common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in []") common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in []")
_, err = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultInt64FieldName, []string{""})) _, err = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultInt64FieldName, []string{""}))
common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in [\"\"]") common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in [\"\"]")
t.Log("https://github.com/milvus-io/milvus/issues/33761") t.Log("https://github.com/milvus-io/milvus/issues/33761")
_, err = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr("")) _, err = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(""))
common.CheckErr(t, err, false, "delete plan can't be empty or always true") common.CheckErr(t, err, false, "delete plan can't be empty or always true")
_, err = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName)) _, err = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName))
common.CheckErr(t, err, false, "delete plan can't be empty or always true") common.CheckErr(t, err, false, "delete plan can't be empty or always true")
} }
@ -211,18 +211,18 @@ func TestDeleteVarcharEmptyIds(t *testing.T) {
exprQuery := "varchar != '' " exprQuery := "varchar != '' "
// delete varchar with empty ids // delete varchar with empty ids
delRes, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, []string{})) delRes, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, []string{}))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(0), delRes.DeleteCount) require.Equal(t, int64(0), delRes.DeleteCount)
queryRes, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Equal(t, common.DefaultNb, queryRes.ResultCount) require.Equal(t, common.DefaultNb, queryRes.ResultCount)
// delete with default string ids // delete with default string ids
delRes, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, []string{""})) delRes, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultVarcharFieldName, []string{""}))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(1), delRes.DeleteCount) require.Equal(t, int64(1), delRes.DeleteCount)
queryRes, errQuery = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Equal(t, common.DefaultNb, queryRes.ResultCount) require.Equal(t, common.DefaultNb, queryRes.ResultCount)
} }
@ -236,13 +236,13 @@ func TestDeleteInvalidIds(t *testing.T) {
cp := hp.NewCreateCollectionParams(hp.VarcharBinary) cp := hp.NewCreateCollectionParams(hp.VarcharBinary)
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
_, err := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultVarcharFieldName, []int64{0})) _, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultVarcharFieldName, []int64{0}))
common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: varchar in [0]") common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: varchar in [0]")
_, err = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, []int64{0})) _, err = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, []int64{0}))
common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in [0]") common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in [0]")
_, err = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultInt64FieldName, []string{"0"})) _, err = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithStringIDs(common.DefaultInt64FieldName, []string{"0"}))
common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in [\"0\"]") common.CheckErr(t, err, false, "failed to create delete plan: cannot parse expression: int64 in [\"0\"]")
} }
@ -259,11 +259,11 @@ func TestDeleteWithIds(t *testing.T) {
varcharField := entity.NewField().WithName(common.DefaultVarcharFieldName).WithDataType(entity.FieldTypeVarChar).WithMaxLength(common.MaxLength) varcharField := entity.NewField().WithName(common.DefaultVarcharFieldName).WithDataType(entity.FieldTypeVarChar).WithMaxLength(common.MaxLength)
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(int64Field).WithField(varcharField) schema := entity.NewSchema().WithName(collName).WithField(pkField).WithField(vecField).WithField(int64Field).WithField(varcharField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert // insert
insertOpt := clientv2.NewColumnBasedInsertOption(collName) insertOpt := client.NewColumnBasedInsertOption(collName)
for _, field := range schema.Fields { for _, field := range schema.Fields {
if field.Name == pkName { if field.Name == pkName {
insertOpt.WithColumns(hp.GenColumnData(common.DefaultNb, field.DataType, *hp.TNewDataOption().TWithFieldName(pkName))) insertOpt.WithColumns(hp.GenColumnData(common.DefaultNb, field.DataType, *hp.TNewDataOption().TWithFieldName(pkName)))
@ -278,16 +278,16 @@ func TestDeleteWithIds(t *testing.T) {
hp.CollPrepare.Load(ctx, t, mc, hp.NewLoadParams(collName)) hp.CollPrepare.Load(ctx, t, mc, hp.NewLoadParams(collName))
// delete with non-pk fields ids // delete with non-pk fields ids
resDe1, err := mc.Delete(ctx, clientv2.NewDeleteOption(collName).WithInt64IDs(common.DefaultInt64FieldName, []int64{0, 1})) resDe1, err := mc.Delete(ctx, client.NewDeleteOption(collName).WithInt64IDs(common.DefaultInt64FieldName, []int64{0, 1}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, int64(2), resDe1.DeleteCount) require.Equal(t, int64(2), resDe1.DeleteCount)
resDe2, err2 := mc.Delete(ctx, clientv2.NewDeleteOption(collName).WithStringIDs(common.DefaultVarcharFieldName, []string{"2", "3", "4"})) resDe2, err2 := mc.Delete(ctx, client.NewDeleteOption(collName).WithStringIDs(common.DefaultVarcharFieldName, []string{"2", "3", "4"}))
common.CheckErr(t, err2, true) common.CheckErr(t, err2, true)
require.Equal(t, int64(3), resDe2.DeleteCount) require.Equal(t, int64(3), resDe2.DeleteCount)
// query and verify // query and verify
resQuery, err := mc.Query(ctx, clientv2.NewQueryOption(collName).WithFilter("pk < 5").WithConsistencyLevel(entity.ClStrong)) resQuery, err := mc.Query(ctx, client.NewQueryOption(collName).WithFilter("pk < 5").WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Zero(t, resQuery.ResultCount) require.Zero(t, resQuery.ResultCount)
} }
@ -301,7 +301,7 @@ func TestDeleteDefaultPartitionName(t *testing.T) {
cp := hp.NewCreateCollectionParams(hp.Int64Vec) cp := hp.NewCreateCollectionParams(hp.Int64Vec)
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
parName := "p1" parName := "p1"
err := mc.CreatePartition(ctx, clientv2.NewCreatePartitionOption(schema.CollectionName, parName)) err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert [0, 3000) into default, insert [3000, 6000) into p1 // insert [0, 3000) into default, insert [3000, 6000) into p1
@ -315,16 +315,16 @@ func TestDeleteDefaultPartitionName(t *testing.T) {
// delete with default params, actually delete from all partitions // delete with default params, actually delete from all partitions
expr := fmt.Sprintf("%s >= 0", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s >= 0", common.DefaultInt64FieldName)
resDel, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(expr)) resDel, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(common.DefaultNb*2), resDel.DeleteCount) require.Equal(t, int64(common.DefaultNb*2), resDel.DeleteCount)
// query, verify delete all partitions // query, verify delete all partitions
queryRes, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Zero(t, queryRes.ResultCount) require.Zero(t, queryRes.ResultCount)
queryRes, errQuery = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithPartitions([]string{common.DefaultPartition, parName}). queryRes, errQuery = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithPartitions([]string{common.DefaultPartition, parName}).
WithConsistencyLevel(entity.ClStrong).WithFilter(expr)) WithConsistencyLevel(entity.ClStrong).WithFilter(expr))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Zero(t, queryRes.ResultCount) require.Zero(t, queryRes.ResultCount)
@ -339,7 +339,7 @@ func TestDeleteEmptyPartitionName(t *testing.T) {
cp := hp.NewCreateCollectionParams(hp.Int64Vec) cp := hp.NewCreateCollectionParams(hp.Int64Vec)
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
parName := "p1" parName := "p1"
err := mc.CreatePartition(ctx, clientv2.NewCreatePartitionOption(schema.CollectionName, parName)) err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert [0, 3000) into default, insert [3000, 6000) into p1 // insert [0, 3000) into default, insert [3000, 6000) into p1
@ -353,16 +353,16 @@ func TestDeleteEmptyPartitionName(t *testing.T) {
// delete with default params, actually delete from all partitions // delete with default params, actually delete from all partitions
expr := fmt.Sprintf("%s >= 0", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s >= 0", common.DefaultInt64FieldName)
resDel, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(expr).WithPartition("")) resDel, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr).WithPartition(""))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(common.DefaultNb*2), resDel.DeleteCount) require.Equal(t, int64(common.DefaultNb*2), resDel.DeleteCount)
// query, verify delete all partitions // query, verify delete all partitions
queryRes, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Zero(t, queryRes.ResultCount) require.Zero(t, queryRes.ResultCount)
queryRes, errQuery = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithPartitions([]string{common.DefaultPartition, parName}). queryRes, errQuery = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithPartitions([]string{common.DefaultPartition, parName}).
WithConsistencyLevel(entity.ClStrong).WithFilter(expr)) WithConsistencyLevel(entity.ClStrong).WithFilter(expr))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Zero(t, queryRes.ResultCount) require.Zero(t, queryRes.ResultCount)
@ -377,7 +377,7 @@ func TestDeletePartitionName(t *testing.T) {
cp := hp.NewCreateCollectionParams(hp.Int64Vec) cp := hp.NewCreateCollectionParams(hp.Int64Vec)
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
parName := "p1" parName := "p1"
err := mc.CreatePartition(ctx, clientv2.NewCreatePartitionOption(schema.CollectionName, parName)) err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert [0, 3000) into default, insert [3000, 6000) into parName // insert [0, 3000) into default, insert [3000, 6000) into parName
@ -396,37 +396,37 @@ func TestDeletePartitionName(t *testing.T) {
// delete ids that not existed in partition // delete ids that not existed in partition
// delete [0, 200) from p1 // delete [0, 200) from p1
del1, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(exprDefault).WithPartition(parName)) del1, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(exprDefault).WithPartition(parName))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(0), del1.DeleteCount) require.Equal(t, int64(0), del1.DeleteCount)
// delete [4800, 6000) from _default // delete [4800, 6000) from _default
del2, errDelete := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(exprP1).WithPartition(common.DefaultPartition)) del2, errDelete := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(exprP1).WithPartition(common.DefaultPartition))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(0), del2.DeleteCount) require.Equal(t, int64(0), del2.DeleteCount)
// query and verify // query and verify
resQuery, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithOutputFields([]string{common.QueryCountFieldName}). resQuery, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithOutputFields([]string{common.QueryCountFieldName}).
WithConsistencyLevel(entity.ClStrong)) WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
count, _ := resQuery.Fields[0].GetAsInt64(0) count, _ := resQuery.Fields[0].GetAsInt64(0)
require.Equal(t, int64(common.DefaultNb*2), count) require.Equal(t, int64(common.DefaultNb*2), count)
// delete from partition // delete from partition
del1, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(exprDefault).WithPartition(common.DefaultPartition)) del1, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(exprDefault).WithPartition(common.DefaultPartition))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(200), del1.DeleteCount) require.Equal(t, int64(200), del1.DeleteCount)
del2, errDelete = mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(exprP1).WithPartition(parName)) del2, errDelete = mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(exprP1).WithPartition(parName))
common.CheckErr(t, errDelete, true) common.CheckErr(t, errDelete, true)
require.Equal(t, int64(1500), del2.DeleteCount) require.Equal(t, int64(1500), del2.DeleteCount)
// query, verify delete all partitions // query, verify delete all partitions
queryRes, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong)) queryRes, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Equal(t, common.DefaultNb*2-200-1500, queryRes.ResultCount) require.Equal(t, common.DefaultNb*2-200-1500, queryRes.ResultCount)
queryRes, errQuery = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong). queryRes, errQuery = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprQuery).WithConsistencyLevel(entity.ClStrong).
WithPartitions([]string{common.DefaultPartition, parName})) WithPartitions([]string{common.DefaultPartition, parName}))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Equal(t, common.DefaultNb*2-200-1500, queryRes.ResultCount) require.Equal(t, common.DefaultNb*2-200-1500, queryRes.ResultCount)
@ -494,12 +494,12 @@ func TestDeleteComplexExpr(t *testing.T) {
log.Debug("TestDeleteComplexExpr", zap.Any("expr", exprLimit.expr)) log.Debug("TestDeleteComplexExpr", zap.Any("expr", exprLimit.expr))
resDe, err := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(exprLimit.expr)) resDe, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(exprLimit.expr))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
log.Debug("delete count", zap.Bool("equal", int64(exprLimit.count) == resDe.DeleteCount)) log.Debug("delete count", zap.Bool("equal", int64(exprLimit.count) == resDe.DeleteCount))
// require.Equal(t, int64(exprLimit.count), resDe.DeleteCount) // require.Equal(t, int64(exprLimit.count), resDe.DeleteCount)
resQuery, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(exprLimit.expr).WithConsistencyLevel(entity.ClStrong)) resQuery, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprLimit.expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Zero(t, resQuery.ResultCount) require.Zero(t, resQuery.ResultCount)
} }
@ -522,7 +522,7 @@ func TestDeleteInvalidExpr(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
for _, _invalidExpr := range common.InvalidExpressions { for _, _invalidExpr := range common.InvalidExpressions {
_, err := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithExpr(_invalidExpr.Expr)) _, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(_invalidExpr.Expr))
common.CheckErr(t, err, _invalidExpr.ErrNil, _invalidExpr.ErrMsg) common.CheckErr(t, err, _invalidExpr.ErrNil, _invalidExpr.ErrMsg)
} }
} }
@ -546,13 +546,13 @@ func TestDeleteDuplicatedPks(t *testing.T) {
// delete // delete
deleteIds := []int64{0, 0, 0, 0, 0} deleteIds := []int64{0, 0, 0, 0, 0}
delRes, err := mc.Delete(ctx, clientv2.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, deleteIds)) delRes, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithInt64IDs(common.DefaultInt64FieldName, deleteIds))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, 5, int(delRes.DeleteCount)) require.Equal(t, 5, int(delRes.DeleteCount))
// query, verify delete success // query, verify delete success
expr := fmt.Sprintf("%s >= 0 ", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s >= 0 ", common.DefaultInt64FieldName)
resQuery, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong)) resQuery, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
require.Equal(t, common.DefaultNb-1, resQuery.ResultCount) require.Equal(t, common.DefaultNb-1, resQuery.ResultCount)
} }

View File

@ -17,14 +17,12 @@ import (
type InsertParams struct { type InsertParams struct {
Schema *entity.Schema Schema *entity.Schema
PartitionName string PartitionName string
Nb int
IsRows bool IsRows bool
} }
func NewInsertParams(schema *entity.Schema, nb int) *InsertParams { func NewInsertParams(schema *entity.Schema, nb int) *InsertParams {
return &InsertParams{ return &InsertParams{
Schema: schema, Schema: schema,
Nb: nb,
} }
} }
@ -40,13 +38,19 @@ func (opt *InsertParams) TWithIsRows(isRows bool) *InsertParams {
// GenColumnDataOption -- create column data -- // GenColumnDataOption -- create column data --
type GenDataOption struct { type GenDataOption struct {
nb int
start int
dim int dim int
maxLen int maxLen int
sparseMaxLen int sparseMaxLen int
maxCapacity int maxCapacity int
start int
fieldName string
elementType entity.FieldType elementType entity.FieldType
fieldName string
}
func (opt *GenDataOption) TWithNb(nb int) *GenDataOption {
opt.nb = nb
return opt
} }
func (opt *GenDataOption) TWithDim(dim int) *GenDataOption { func (opt *GenDataOption) TWithDim(dim int) *GenDataOption {
@ -86,11 +90,12 @@ func (opt *GenDataOption) TWithElementType(eleType entity.FieldType) *GenDataOpt
func TNewDataOption() *GenDataOption { func TNewDataOption() *GenDataOption {
return &GenDataOption{ return &GenDataOption{
nb: common.DefaultNb,
start: 0,
dim: common.DefaultDim, dim: common.DefaultDim,
maxLen: common.TestMaxLen, maxLen: common.TestMaxLen,
sparseMaxLen: common.TestMaxLen, sparseMaxLen: common.TestMaxLen,
maxCapacity: common.TestCapacity, maxCapacity: common.TestCapacity,
start: 0,
elementType: entity.FieldTypeNone, elementType: entity.FieldTypeNone,
} }
} }
@ -413,3 +418,25 @@ func MergeColumnsToDynamic(nb int, columns []column.Column, columnName string) *
return jsonColumn return jsonColumn
} }
func GenColumnsBasedSchema(schema *entity.Schema, option *GenDataOption) ([]column.Column, []column.Column) {
if nil == schema || schema.CollectionName == "" {
log.Fatal("[GenColumnsBasedSchema] Nil Schema is not expected")
}
fields := schema.Fields
columns := make([]column.Column, 0, len(fields)+1)
var dynamicColumns []column.Column
for _, field := range fields {
if field.DataType == entity.FieldTypeArray {
option.TWithElementType(field.ElementType)
}
if field.AutoID {
continue
}
columns = append(columns, GenColumnData(option.nb, field.DataType, *option))
}
if schema.EnableDynamicField {
dynamicColumns = GenDynamicColumnData(option.start, option.nb)
}
return columns, dynamicColumns
}

View File

@ -134,18 +134,8 @@ func (chainTask *CollectionPrepare) InsertData(ctx context.Context, t *testing.T
if nil == ip.Schema || ip.Schema.CollectionName == "" { if nil == ip.Schema || ip.Schema.CollectionName == "" {
log.Fatal("[InsertData] Nil Schema is not expected") log.Fatal("[InsertData] Nil Schema is not expected")
} }
fields := ip.Schema.Fields columns, dynamicColumns := GenColumnsBasedSchema(ip.Schema, option)
insertOpt := clientv2.NewColumnBasedInsertOption(ip.Schema.CollectionName) insertOpt := clientv2.NewColumnBasedInsertOption(ip.Schema.CollectionName).WithColumns(columns...).WithColumns(dynamicColumns...)
if ip.Schema.EnableDynamicField {
insertOpt.WithColumns(GenDynamicColumnData(option.start, ip.Nb)...)
}
for _, field := range fields {
if field.DataType == entity.FieldTypeArray {
option.TWithElementType(field.ElementType)
}
column := GenColumnData(ip.Nb, field.DataType, *option)
insertOpt.WithColumns(column)
}
if ip.PartitionName != "" { if ip.PartitionName != "" {
insertOpt.WithPartition(ip.PartitionName) insertOpt.WithPartition(ip.PartitionName)
} }

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
clientv2 "github.com/milvus-io/milvus/client/v2" "github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/client/v2/column" "github.com/milvus-io/milvus/client/v2/column"
"github.com/milvus-io/milvus/client/v2/entity" "github.com/milvus-io/milvus/client/v2/entity"
"github.com/milvus-io/milvus/client/v2/index" "github.com/milvus-io/milvus/client/v2/index"
@ -29,7 +29,7 @@ func TestInsertDefault(t *testing.T) {
columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim) columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim)
pkColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeInt64, *columnOpt) pkColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeInt64, *columnOpt)
vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *columnOpt) vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *columnOpt)
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn)
if !autoID { if !autoID {
insertOpt.WithColumns(pkColumn) insertOpt.WithColumns(pkColumn)
} }
@ -51,14 +51,14 @@ func TestInsertDefaultPartition(t *testing.T) {
// create partition // create partition
parName := common.GenRandomString("par", 4) parName := common.GenRandomString("par", 4)
err := mc.CreatePartition(ctx, clientv2.NewCreatePartitionOption(schema.CollectionName, parName)) err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert // insert
columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim) columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim)
pkColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeInt64, *columnOpt) pkColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeInt64, *columnOpt)
vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *columnOpt) vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *columnOpt)
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn)
if !autoID { if !autoID {
insertOpt.WithColumns(pkColumn) insertOpt.WithColumns(pkColumn)
} }
@ -82,7 +82,7 @@ func TestInsertVarcharPkDefault(t *testing.T) {
columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim) columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim)
pkColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, *columnOpt) pkColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, *columnOpt)
vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeBinaryVector, *columnOpt) vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeBinaryVector, *columnOpt)
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn)
if !autoID { if !autoID {
insertOpt.WithColumns(pkColumn) insertOpt.WithColumns(pkColumn)
} }
@ -105,7 +105,7 @@ func TestInsertAllFieldsData(t *testing.T) {
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(dynamic)) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(dynamic))
// insert // insert
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName)
columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim) columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim)
for _, field := range schema.Fields { for _, field := range schema.Fields {
if field.DataType == entity.FieldTypeArray { if field.DataType == entity.FieldTypeArray {
@ -123,7 +123,7 @@ func TestInsertAllFieldsData(t *testing.T) {
common.CheckInsertResult(t, pkColumn, insertRes) common.CheckInsertResult(t, pkColumn, insertRes)
// flush and check row count // flush and check row count
flushTak, _ := mc.Flush(ctx, clientv2.NewFlushOption(schema.CollectionName)) flushTak, _ := mc.Flush(ctx, client.NewFlushOption(schema.CollectionName))
err := flushTak.Await(ctx) err := flushTak.Await(ctx)
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
} }
@ -139,7 +139,7 @@ func TestInsertDynamicExtraColumn(t *testing.T) {
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true)) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true))
// insert without dynamic field // insert without dynamic field
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName)
columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim) columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim)
for _, field := range schema.Fields { for _, field := range schema.Fields {
@ -151,7 +151,7 @@ func TestInsertDynamicExtraColumn(t *testing.T) {
require.Equal(t, common.DefaultNb, int(insertRes.InsertCount)) require.Equal(t, common.DefaultNb, int(insertRes.InsertCount))
// insert with dynamic field // insert with dynamic field
insertOptDynamic := clientv2.NewColumnBasedInsertOption(schema.CollectionName) insertOptDynamic := client.NewColumnBasedInsertOption(schema.CollectionName)
columnOpt.TWithStart(common.DefaultNb) columnOpt.TWithStart(common.DefaultNb)
for _, fieldType := range hp.GetAllScalarFieldType() { for _, fieldType := range hp.GetAllScalarFieldType() {
if fieldType == entity.FieldTypeArray { if fieldType == entity.FieldTypeArray {
@ -166,17 +166,17 @@ func TestInsertDynamicExtraColumn(t *testing.T) {
require.Equal(t, common.DefaultNb, int(insertRes2.InsertCount)) require.Equal(t, common.DefaultNb, int(insertRes2.InsertCount))
// index // index
it, _ := mc.CreateIndex(ctx, clientv2.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, index.NewSCANNIndex(entity.COSINE, 32, false))) it, _ := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, index.NewSCANNIndex(entity.COSINE, 32, false)))
err := it.Await(ctx) err := it.Await(ctx)
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// load // load
lt, _ := mc.LoadCollection(ctx, clientv2.NewLoadCollectionOption(schema.CollectionName)) lt, _ := mc.LoadCollection(ctx, client.NewLoadCollectionOption(schema.CollectionName))
err = lt.Await(ctx) err = lt.Await(ctx)
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// query // query
res, _ := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter("int64 == 3000").WithOutputFields([]string{"*"})) res, _ := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter("int64 == 3000").WithOutputFields([]string{"*"}))
common.CheckOutputFields(t, []string{common.DefaultFloatVecFieldName, common.DefaultInt64FieldName, common.DefaultDynamicFieldName}, res.Fields) common.CheckOutputFields(t, []string{common.DefaultFloatVecFieldName, common.DefaultInt64FieldName, common.DefaultDynamicFieldName}, res.Fields)
for _, c := range res.Fields { for _, c := range res.Fields {
log.Debug("data", zap.Any("data", c.FieldData())) log.Debug("data", zap.Any("data", c.FieldData()))
@ -192,7 +192,7 @@ func TestInsertEmptyArray(t *testing.T) {
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim).TWithMaxCapacity(0) columnOpt := hp.TNewDataOption().TWithDim(common.DefaultDim).TWithMaxCapacity(0)
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName)
for _, field := range schema.Fields { for _, field := range schema.Fields {
if field.DataType == entity.FieldTypeArray { if field.DataType == entity.FieldTypeArray {
columnOpt.TWithElementType(field.ElementType) columnOpt.TWithElementType(field.ElementType)
@ -222,7 +222,7 @@ func TestInsertArrayDataTypeNotMatch(t *testing.T) {
// create collection // create collection
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(arrayField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(arrayField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// prepare data // prepare data
@ -231,7 +231,7 @@ func TestInsertArrayDataTypeNotMatch(t *testing.T) {
columnType = entity.FieldTypeBool columnType = entity.FieldTypeBool
} }
arrayColumn := hp.GenColumnData(100, entity.FieldTypeArray, *hp.TNewDataOption().TWithElementType(columnType).TWithFieldName("array")) arrayColumn := hp.GenColumnData(100, entity.FieldTypeArray, *hp.TNewDataOption().TWithElementType(columnType).TWithFieldName("array"))
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(collName, int64Column, vecColumn, arrayColumn)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(collName, int64Column, vecColumn, arrayColumn))
common.CheckErr(t, err, false, "insert data does not match") common.CheckErr(t, err, false, "insert data does not match")
} }
} }
@ -253,12 +253,12 @@ func TestInsertArrayDataCapacityExceed(t *testing.T) {
// create collection // create collection
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(arrayField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField).WithField(arrayField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert array data capacity > field.MaxCapacity // insert array data capacity > field.MaxCapacity
arrayColumn := hp.GenColumnData(100, entity.FieldTypeArray, *hp.TNewDataOption().TWithElementType(eleType).TWithFieldName("array").TWithMaxCapacity(common.TestCapacity * 2)) arrayColumn := hp.GenColumnData(100, entity.FieldTypeArray, *hp.TNewDataOption().TWithElementType(eleType).TWithFieldName("array").TWithMaxCapacity(common.TestCapacity * 2))
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(collName, int64Column, vecColumn, arrayColumn)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(collName, int64Column, vecColumn, arrayColumn))
common.CheckErr(t, err, false, "array length exceeds max capacity") common.CheckErr(t, err, false, "array length exceeds max capacity")
} }
} }
@ -270,7 +270,7 @@ func TestInsertNotExist(t *testing.T) {
// insert data into not exist collection // insert data into not exist collection
intColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeInt64, *hp.TNewDataOption()) intColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeInt64, *hp.TNewDataOption())
_, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption("notExist", intColumn)) _, err := mc.Insert(ctx, client.NewColumnBasedInsertOption("notExist", intColumn))
common.CheckErr(t, err, false, "can't find collection") common.CheckErr(t, err, false, "can't find collection")
// insert data into not exist partition // insert data into not exist partition
@ -278,7 +278,7 @@ func TestInsertNotExist(t *testing.T) {
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, cp, hp.TNewFieldsOption(), hp.TNewSchemaOption())
vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *hp.TNewDataOption().TWithDim(common.DefaultDim)) vecColumn := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *hp.TNewDataOption().TWithDim(common.DefaultDim))
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, intColumn, vecColumn).WithPartition("aaa")) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, intColumn, vecColumn).WithPartition("aaa"))
common.CheckErr(t, err, false, "partition not found") common.CheckErr(t, err, false, "partition not found")
} }
@ -300,19 +300,19 @@ func TestInsertColumnsMismatchFields(t *testing.T) {
collName := schema.CollectionName collName := schema.CollectionName
// len(column) < len(fields) // len(column) < len(fields)
_, errInsert := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(collName, intColumn)) _, errInsert := mc.Insert(ctx, client.NewColumnBasedInsertOption(collName, intColumn))
common.CheckErr(t, errInsert, false, "not passed") common.CheckErr(t, errInsert, false, "not passed")
// len(column) > len(fields) // len(column) > len(fields)
_, errInsert2 := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(collName, intColumn, vecColumn, vecColumn)) _, errInsert2 := mc.Insert(ctx, client.NewColumnBasedInsertOption(collName, intColumn, vecColumn, vecColumn))
common.CheckErr(t, errInsert2, false, "duplicated column") common.CheckErr(t, errInsert2, false, "duplicated column")
// //
_, errInsert3 := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(collName, intColumn, floatColumn, vecColumn)) _, errInsert3 := mc.Insert(ctx, client.NewColumnBasedInsertOption(collName, intColumn, floatColumn, vecColumn))
common.CheckErr(t, errInsert3, false, "does not exist in collection") common.CheckErr(t, errInsert3, false, "does not exist in collection")
// order(column) != order(fields) // order(column) != order(fields)
_, errInsert4 := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(collName, vecColumn, intColumn)) _, errInsert4 := mc.Insert(ctx, client.NewColumnBasedInsertOption(collName, vecColumn, intColumn))
common.CheckErr(t, errInsert4, true) common.CheckErr(t, errInsert4, true)
} }
@ -330,7 +330,7 @@ func TestInsertColumnsDifferentLen(t *testing.T) {
vecColumn := hp.GenColumnData(200, entity.FieldTypeFloatVector, *columnOpt) vecColumn := hp.GenColumnData(200, entity.FieldTypeFloatVector, *columnOpt)
// len(column) < len(fields) // len(column) < len(fields)
_, errInsert := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, intColumn, vecColumn)) _, errInsert := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, intColumn, vecColumn))
common.CheckErr(t, errInsert, false, "column size not match") common.CheckErr(t, errInsert, false, "column size not match")
} }
@ -346,17 +346,17 @@ func TestInsertInvalidColumn(t *testing.T) {
pkColumn := column.NewColumnInt64(common.DefaultInt64FieldName, []int64{}) pkColumn := column.NewColumnInt64(common.DefaultInt64FieldName, []int64{})
vecColumn := hp.GenColumnData(100, entity.FieldTypeFloatVector, *hp.TNewDataOption()) vecColumn := hp.GenColumnData(100, entity.FieldTypeFloatVector, *hp.TNewDataOption())
_, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumn)) _, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumn))
common.CheckErr(t, err, false, "need long int array][actual=got nil]") common.CheckErr(t, err, false, "need long int array][actual=got nil]")
// insert with empty vector data // insert with empty vector data
vecColumn2 := column.NewColumnFloatVector(common.DefaultFloatVecFieldName, common.DefaultDim, [][]float32{}) vecColumn2 := column.NewColumnFloatVector(common.DefaultFloatVecFieldName, common.DefaultDim, [][]float32{})
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumn2)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumn2))
common.CheckErr(t, err, false, "num_rows should be greater than 0") common.CheckErr(t, err, false, "num_rows should be greater than 0")
// insert with vector data dim not match // insert with vector data dim not match
vecColumnDim := column.NewColumnFloatVector(common.DefaultFloatVecFieldName, common.DefaultDim-8, [][]float32{}) vecColumnDim := column.NewColumnFloatVector(common.DefaultFloatVecFieldName, common.DefaultDim-8, [][]float32{})
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumnDim)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumnDim))
common.CheckErr(t, err, false, "vector dim 120 not match collection definition") common.CheckErr(t, err, false, "vector dim 120 not match collection definition")
} }
@ -378,7 +378,7 @@ func TestInsertColumnVarcharExceedLen(t *testing.T) {
pkColumn := column.NewColumnVarChar(common.DefaultVarcharFieldName, varcharValues) pkColumn := column.NewColumnVarChar(common.DefaultVarcharFieldName, varcharValues)
vecColumn := hp.GenColumnData(100, entity.FieldTypeBinaryVector, *hp.TNewDataOption()) vecColumn := hp.GenColumnData(100, entity.FieldTypeBinaryVector, *hp.TNewDataOption())
_, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumn)) _, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, vecColumn))
common.CheckErr(t, err, false, "the length (12) of 0th VarChar varchar exceeds max length (0)%!(EXTRA int64=10)") common.CheckErr(t, err, false, "the length (12) of 0th VarChar varchar exceeds max length (0)%!(EXTRA int64=10)")
} }
@ -398,7 +398,7 @@ func TestInsertSparseData(t *testing.T) {
hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, *columnOpt), hp.GenColumnData(common.DefaultNb, entity.FieldTypeVarChar, *columnOpt),
hp.GenColumnData(common.DefaultNb, entity.FieldTypeSparseVector, *columnOpt.TWithSparseMaxLen(common.DefaultDim)), hp.GenColumnData(common.DefaultNb, entity.FieldTypeSparseVector, *columnOpt.TWithSparseMaxLen(common.DefaultDim)),
} }
inRes, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, columns...)) inRes, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, columns...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckInsertResult(t, pkColumn, inRes) common.CheckInsertResult(t, pkColumn, inRes)
} }
@ -422,7 +422,7 @@ func TestInsertSparseDataMaxDim(t *testing.T) {
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
sparseColumn := column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec}) sparseColumn := column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec})
inRes, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, varcharColumn, sparseColumn)) inRes, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, pkColumn, varcharColumn, sparseColumn))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckInsertResult(t, pkColumn, inRes) common.CheckInsertResult(t, pkColumn, inRes)
} }
@ -453,7 +453,7 @@ func TestInsertSparseInvalidVector(t *testing.T) {
sparseVec, err := entity.NewSliceSparseEmbedding(positions, values) sparseVec, err := entity.NewSliceSparseEmbedding(positions, values)
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
data1 := append(data, column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec})) data1 := append(data, column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec}))
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, data1...)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, data1...))
common.CheckErr(t, err, false, "invalid index in sparse float vector: must be less than 2^32-1") common.CheckErr(t, err, false, "invalid index in sparse float vector: must be less than 2^32-1")
// invalid sparse vector: empty position and values // invalid sparse vector: empty position and values
@ -462,7 +462,7 @@ func TestInsertSparseInvalidVector(t *testing.T) {
sparseVec, err = entity.NewSliceSparseEmbedding(positions, values) sparseVec, err = entity.NewSliceSparseEmbedding(positions, values)
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
data2 := append(data, column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec})) data2 := append(data, column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec}))
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, data2...)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, data2...))
common.CheckErr(t, err, false, "empty sparse float vector row") common.CheckErr(t, err, false, "empty sparse float vector row")
} }
@ -484,7 +484,7 @@ func TestInsertSparseVectorSamePosition(t *testing.T) {
sparseVec, err := entity.NewSliceSparseEmbedding([]uint32{2, 10, 2}, []float32{0.4, 0.5, 0.6}) sparseVec, err := entity.NewSliceSparseEmbedding([]uint32{2, 10, 2}, []float32{0.4, 0.5, 0.6})
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
data = append(data, column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec})) data = append(data, column.NewColumnSparseVectors(common.DefaultSparseVecFieldName, []entity.SparseEmbedding{sparseVec}))
_, err = mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, data...)) _, err = mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, data...))
common.CheckErr(t, err, false, "unsorted or same indices in sparse float vector") common.CheckErr(t, err, false, "unsorted or same indices in sparse float vector")
} }
@ -506,7 +506,7 @@ func TestInsertDefaultRows(t *testing.T) {
// insert rows // insert rows
rows := hp.GenInt64VecRows(common.DefaultNb, false, autoId, *hp.TNewDataOption()) rows := hp.GenInt64VecRows(common.DefaultNb, false, autoId, *hp.TNewDataOption())
log.Info("rows data", zap.Any("rows[8]", rows[8])) log.Info("rows data", zap.Any("rows[8]", rows[8]))
ids, err := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) ids, err := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
if !autoId { if !autoId {
int64Values := make([]int64, 0, common.DefaultNb) int64Values := make([]int64, 0, common.DefaultNb)
@ -518,7 +518,7 @@ func TestInsertDefaultRows(t *testing.T) {
require.Equal(t, ids.InsertCount, int64(common.DefaultNb)) require.Equal(t, ids.InsertCount, int64(common.DefaultNb))
// flush and check row count // flush and check row count
flushTask, errFlush := mc.Flush(ctx, clientv2.NewFlushOption(schema.CollectionName)) flushTask, errFlush := mc.Flush(ctx, client.NewFlushOption(schema.CollectionName))
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
errFlush = flushTask.Await(ctx) errFlush = flushTask.Await(ctx)
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
@ -541,7 +541,7 @@ func TestInsertAllFieldsRows(t *testing.T) {
rows := hp.GenAllFieldsRows(common.DefaultNb, false, *hp.TNewDataOption()) rows := hp.GenAllFieldsRows(common.DefaultNb, false, *hp.TNewDataOption())
log.Debug("", zap.Any("row[0]", rows[0])) log.Debug("", zap.Any("row[0]", rows[0]))
log.Debug("", zap.Any("row", rows[1])) log.Debug("", zap.Any("row", rows[1]))
ids, err := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) ids, err := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
int64Values := make([]int64, 0, common.DefaultNb) int64Values := make([]int64, 0, common.DefaultNb)
@ -551,7 +551,7 @@ func TestInsertAllFieldsRows(t *testing.T) {
common.CheckInsertResult(t, column.NewColumnInt64(common.DefaultInt64FieldName, int64Values), ids) common.CheckInsertResult(t, column.NewColumnInt64(common.DefaultInt64FieldName, int64Values), ids)
// flush and check row count // flush and check row count
flushTask, errFlush := mc.Flush(ctx, clientv2.NewFlushOption(schema.CollectionName)) flushTask, errFlush := mc.Flush(ctx, client.NewFlushOption(schema.CollectionName))
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
errFlush = flushTask.Await(ctx) errFlush = flushTask.Await(ctx)
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
@ -572,7 +572,7 @@ func TestInsertVarcharRows(t *testing.T) {
// insert rows // insert rows
rows := hp.GenInt64VarcharSparseRows(common.DefaultNb, false, autoId, *hp.TNewDataOption().TWithSparseMaxLen(1000)) rows := hp.GenInt64VarcharSparseRows(common.DefaultNb, false, autoId, *hp.TNewDataOption().TWithSparseMaxLen(1000))
ids, err := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) ids, err := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
int64Values := make([]int64, 0, common.DefaultNb) int64Values := make([]int64, 0, common.DefaultNb)
@ -582,7 +582,7 @@ func TestInsertVarcharRows(t *testing.T) {
common.CheckInsertResult(t, column.NewColumnInt64(common.DefaultInt64FieldName, int64Values), ids) common.CheckInsertResult(t, column.NewColumnInt64(common.DefaultInt64FieldName, int64Values), ids)
// flush and check row count // flush and check row count
flushTask, errFlush := mc.Flush(ctx, clientv2.NewFlushOption(schema.CollectionName)) flushTask, errFlush := mc.Flush(ctx, client.NewFlushOption(schema.CollectionName))
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
errFlush = flushTask.Await(ctx) errFlush = flushTask.Await(ctx)
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
@ -597,7 +597,7 @@ func TestInsertSparseRows(t *testing.T) {
sparseField := entity.NewField().WithName(common.DefaultSparseVecFieldName).WithDataType(entity.FieldTypeSparseVector) sparseField := entity.NewField().WithName(common.DefaultSparseVecFieldName).WithDataType(entity.FieldTypeSparseVector)
collName := common.GenRandomString("insert", 6) collName := common.GenRandomString("insert", 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(sparseField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(sparseField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// prepare rows // prepare rows
@ -613,7 +613,7 @@ func TestInsertSparseRows(t *testing.T) {
} }
rows = append(rows, &baseRow) rows = append(rows, &baseRow)
} }
ids, err := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) ids, err := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
int64Values := make([]int64, 0, common.DefaultNb) int64Values := make([]int64, 0, common.DefaultNb)
@ -623,7 +623,7 @@ func TestInsertSparseRows(t *testing.T) {
common.CheckInsertResult(t, column.NewColumnInt64(common.DefaultInt64FieldName, int64Values), ids) common.CheckInsertResult(t, column.NewColumnInt64(common.DefaultInt64FieldName, int64Values), ids)
// flush and check row count // flush and check row count
flushTask, errFlush := mc.Flush(ctx, clientv2.NewFlushOption(schema.CollectionName)) flushTask, errFlush := mc.Flush(ctx, client.NewFlushOption(schema.CollectionName))
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
errFlush = flushTask.Await(ctx) errFlush = flushTask.Await(ctx)
common.CheckErr(t, errFlush, true) common.CheckErr(t, errFlush, true)
@ -639,12 +639,12 @@ func TestInsertRowFieldNameNotMatch(t *testing.T) {
int64Field := entity.NewField().WithName("pk").WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true) int64Field := entity.NewField().WithName("pk").WithDataType(entity.FieldTypeInt64).WithIsPrimaryKey(true)
collName := common.GenRandomString(prefix, 6) collName := common.GenRandomString(prefix, 6)
schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField) schema := entity.NewSchema().WithName(collName).WithField(int64Field).WithField(vecField)
err := mc.CreateCollection(ctx, clientv2.NewCreateCollectionOption(collName, schema)) err := mc.CreateCollection(ctx, client.NewCreateCollectionOption(collName, schema))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert rows, with json key name: int64 // insert rows, with json key name: int64
rows := hp.GenInt64VecRows(10, false, false, *hp.TNewDataOption()) rows := hp.GenInt64VecRows(10, false, false, *hp.TNewDataOption())
_, errInsert := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) _, errInsert := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, errInsert, false, "row 0 does not has field pk") common.CheckErr(t, errInsert, false, "row 0 does not has field pk")
} }
@ -664,7 +664,7 @@ func TestInsertRowMismatchFields(t *testing.T) {
} }
rowsLess = append(rowsLess, row) rowsLess = append(rowsLess, row)
} }
_, errInsert := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rowsLess...)) _, errInsert := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rowsLess...))
common.CheckErr(t, errInsert, false, "[expected=need float vector][actual=got nil]") common.CheckErr(t, errInsert, false, "[expected=need float vector][actual=got nil]")
/* /*
@ -680,7 +680,7 @@ func TestInsertRowMismatchFields(t *testing.T) {
rowsMore = append(rowsMore, row) rowsMore = append(rowsMore, row)
} }
log.Debug("Row data", zap.Any("row[0]", rowsMore[0])) log.Debug("Row data", zap.Any("row[0]", rowsMore[0]))
_, errInsert = mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rowsMore...)) _, errInsert = mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rowsMore...))
common.CheckErr(t, errInsert, false, "") common.CheckErr(t, errInsert, false, "")
*/ */
@ -694,7 +694,7 @@ func TestInsertRowMismatchFields(t *testing.T) {
rowsOrder = append(rowsOrder, row) rowsOrder = append(rowsOrder, row)
} }
log.Debug("Row data", zap.Any("row[0]", rowsOrder[0])) log.Debug("Row data", zap.Any("row[0]", rowsOrder[0]))
_, errInsert = mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rowsOrder...)) _, errInsert = mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rowsOrder...))
common.CheckErr(t, errInsert, true) common.CheckErr(t, errInsert, true)
} }
@ -711,7 +711,7 @@ func TestInsertAutoIDInvalidRow(t *testing.T) {
// insert rows: autoId true -> o pk data; autoID false -> has pk data // insert rows: autoId true -> o pk data; autoID false -> has pk data
rows := hp.GenInt64VecRows(10, false, !autoId, *hp.TNewDataOption()) rows := hp.GenInt64VecRows(10, false, !autoId, *hp.TNewDataOption())
log.Info("rows data", zap.Any("rows[8]", rows[0])) log.Info("rows data", zap.Any("rows[8]", rows[0]))
_, err := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) _, err := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, err, false, "missing pk data") common.CheckErr(t, err, false, "missing pk data")
} }
} }

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
clientv2 "github.com/milvus-io/milvus/client/v2" "github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/client/v2/column" "github.com/milvus-io/milvus/client/v2/column"
"github.com/milvus-io/milvus/client/v2/entity" "github.com/milvus-io/milvus/client/v2/entity"
"github.com/milvus-io/milvus/pkg/log" "github.com/milvus-io/milvus/pkg/log"
@ -32,7 +32,7 @@ func TestQueryDefault(t *testing.T) {
// query // query
expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 100) expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 100)
queryRes, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr)) queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{insertRes.IDs.Slice(0, 100)}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{insertRes.IDs.Slice(0, 100)})
} }
@ -52,7 +52,7 @@ func TestQueryVarcharPkDefault(t *testing.T) {
// query // query
expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4']", common.DefaultVarcharFieldName) expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4']", common.DefaultVarcharFieldName)
queryRes, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr)) queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{insertRes.IDs.Slice(0, 5)}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{insertRes.IDs.Slice(0, 5)})
} }
@ -64,7 +64,7 @@ func TestQueryNotExistName(t *testing.T) {
// query with not existed collection // query with not existed collection
expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 100) expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 100)
_, errCol := mc.Query(ctx, clientv2.NewQueryOption("aaa").WithFilter(expr)) _, errCol := mc.Query(ctx, client.NewQueryOption("aaa").WithFilter(expr))
common.CheckErr(t, errCol, false, "can't find collection") common.CheckErr(t, errCol, false, "can't find collection")
// create -> index -> load // create -> index -> load
@ -73,7 +73,7 @@ func TestQueryNotExistName(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
// query with not existed partition // query with not existed partition
_, errPar := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{"aaa"})) _, errPar := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{"aaa"}))
common.CheckErr(t, errPar, false, "partition name aaa not found") common.CheckErr(t, errPar, false, "partition name aaa not found")
} }
@ -88,7 +88,7 @@ func TestQueryInvalidPartitionName(t *testing.T) {
expr := fmt.Sprintf("%s >= %d", common.DefaultInt64FieldName, 0) expr := fmt.Sprintf("%s >= %d", common.DefaultInt64FieldName, 0)
emptyPartitionName := "" emptyPartitionName := ""
// query from "" partitions, expect to query from default partition // query from "" partitions, expect to query from default partition
_, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{emptyPartitionName})) _, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{emptyPartitionName}))
common.CheckErr(t, err, false, "Partition name should not be empty") common.CheckErr(t, err, false, "Partition name should not be empty")
} }
@ -101,7 +101,7 @@ func TestQueryPartition(t *testing.T) {
// create collection and partition // create collection and partition
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true)) prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true))
err := mc.CreatePartition(ctx, clientv2.NewCreatePartitionOption(schema.CollectionName, parName)) err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert [0, 3000) into default, insert [3000, 6000) into parName // insert [0, 3000) into default, insert [3000, 6000) into parName
@ -117,27 +117,27 @@ func TestQueryPartition(t *testing.T) {
expColumn := hp.GenColumnData(common.DefaultNb*2, entity.FieldTypeInt64, *hp.TNewDataOption().TWithStart(0)) expColumn := hp.GenColumnData(common.DefaultNb*2, entity.FieldTypeInt64, *hp.TNewDataOption().TWithStart(0))
// query with default params, expect to query from all partitions // query with default params, expect to query from all partitions
queryRes, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong)) queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{expColumn}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{expColumn})
// query with empty partition names // query with empty partition names
queryRes, err = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{}).WithConsistencyLevel(entity.ClStrong)) queryRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{expColumn}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{expColumn})
// query with default partition // query with default partition
queryRes, err = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{common.DefaultPartition}).WithConsistencyLevel(entity.ClStrong)) queryRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{common.DefaultPartition}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{i1Res.IDs}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{i1Res.IDs})
// query with specify partition // query with specify partition
queryRes, err = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{parName}).WithConsistencyLevel(entity.ClStrong)) queryRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{parName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{i2Res.IDs}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{i2Res.IDs})
// query with all partitions // query with all partitions
queryRes, err = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{common.DefaultPartition, parName}).WithConsistencyLevel(entity.ClStrong)) queryRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithPartitions([]string{common.DefaultPartition, parName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckQueryResult(t, queryRes.Fields, []column.Column{expColumn}) common.CheckQueryResult(t, queryRes.Fields, []column.Column{expColumn})
} }
@ -151,11 +151,11 @@ func TestQueryWithoutExpr(t *testing.T) {
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
// query without expr // query without expr
_, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName)) _, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName))
common.CheckErr(t, err, false, "empty expression should be used with limit") common.CheckErr(t, err, false, "empty expression should be used with limit")
// query with empty expr // query with empty expr
_, err = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter("")) _, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(""))
common.CheckErr(t, err, false, "empty expression should be used with limit") common.CheckErr(t, err, false, "empty expression should be used with limit")
} }
@ -181,12 +181,12 @@ func TestQueryOutputFields(t *testing.T) {
expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 10) expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 10)
// query with empty output fields []string{}-> output "int64" // query with empty output fields []string{}-> output "int64"
queryNilOutputs, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{})) queryNilOutputs, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName}, queryNilOutputs.Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName}, queryNilOutputs.Fields)
// query with empty output fields []string{""}-> output "int64" and dynamic field // query with empty output fields []string{""}-> output "int64" and dynamic field
_, err1 := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{""})) _, err1 := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{""}))
if enableDynamic { if enableDynamic {
common.CheckErr(t, err1, false, "parse output field name failed") common.CheckErr(t, err1, false, "parse output field name failed")
} else { } else {
@ -195,7 +195,7 @@ func TestQueryOutputFields(t *testing.T) {
// query with not existed field -> output field as dynamic or error // query with not existed field -> output field as dynamic or error
fakeName := "aaa" fakeName := "aaa"
res2, err2 := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{fakeName})) res2, err2 := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{fakeName}))
if enableDynamic { if enableDynamic {
common.CheckErr(t, err2, true) common.CheckErr(t, err2, true)
for _, c := range res2.Fields { for _, c := range res2.Fields {
@ -213,7 +213,7 @@ func TestQueryOutputFields(t *testing.T) {
} }
// query with part not existed field ["aa", "$meat"]: error or as dynamic field // query with part not existed field ["aa", "$meat"]: error or as dynamic field
res3, err3 := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{fakeName, common.DefaultDynamicFieldName})) res3, err3 := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{fakeName, common.DefaultDynamicFieldName}))
if enableDynamic { if enableDynamic {
common.CheckErr(t, err3, true) common.CheckErr(t, err3, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, fakeName, common.DefaultDynamicFieldName}, res3.Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, fakeName, common.DefaultDynamicFieldName}, res3.Fields)
@ -222,7 +222,7 @@ func TestQueryOutputFields(t *testing.T) {
} }
// query with repeated field: ["*", "$meat"], ["floatVec", floatVec"] unique field // query with repeated field: ["*", "$meat"], ["floatVec", floatVec"] unique field
res4, err4 := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{"*", common.DefaultDynamicFieldName})) res4, err4 := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{"*", common.DefaultDynamicFieldName}))
if enableDynamic { if enableDynamic {
common.CheckErr(t, err4, true) common.CheckErr(t, err4, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName, common.DefaultDynamicFieldName}, res4.Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName, common.DefaultDynamicFieldName}, res4.Fields)
@ -230,7 +230,7 @@ func TestQueryOutputFields(t *testing.T) {
common.CheckErr(t, err4, false, "$meta not exist") common.CheckErr(t, err4, false, "$meta not exist")
} }
res5, err5 := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields( res5, err5 := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields(
[]string{common.DefaultFloatVecFieldName, common.DefaultFloatVecFieldName, common.DefaultInt64FieldName})) []string{common.DefaultFloatVecFieldName, common.DefaultFloatVecFieldName, common.DefaultInt64FieldName}))
common.CheckErr(t, err5, true) common.CheckErr(t, err5, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName}, res5.Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName}, res5.Fields)
@ -239,7 +239,6 @@ func TestQueryOutputFields(t *testing.T) {
// test query output all fields and verify data // test query output all fields and verify data
func TestQueryOutputAllFieldsColumn(t *testing.T) { func TestQueryOutputAllFieldsColumn(t *testing.T) {
t.Skip("https://github.com/milvus-io/milvus/issues/33848")
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout) ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t) mc := createDefaultMilvusClient(ctx, t)
@ -253,7 +252,7 @@ func TestQueryOutputAllFieldsColumn(t *testing.T) {
columns := make([]column.Column, 0, len(schema.Fields)+1) columns := make([]column.Column, 0, len(schema.Fields)+1)
dynamicColumns := hp.GenDynamicColumnData(0, common.DefaultNb) dynamicColumns := hp.GenDynamicColumnData(0, common.DefaultNb)
genDataOpt := hp.TNewDataOption().TWithMaxCapacity(common.TestCapacity) genDataOpt := hp.TNewDataOption().TWithMaxCapacity(common.TestCapacity)
insertOpt := clientv2.NewColumnBasedInsertOption(schema.CollectionName) insertOpt := client.NewColumnBasedInsertOption(schema.CollectionName)
for _, field := range schema.Fields { for _, field := range schema.Fields {
if field.DataType == entity.FieldTypeArray { if field.DataType == entity.FieldTypeArray {
genDataOpt.TWithElementType(field.ElementType) genDataOpt.TWithElementType(field.ElementType)
@ -277,7 +276,7 @@ func TestQueryOutputAllFieldsColumn(t *testing.T) {
if isDynamic { if isDynamic {
allFieldsName = append(allFieldsName, common.DefaultDynamicFieldName) allFieldsName = append(allFieldsName, common.DefaultDynamicFieldName)
} }
queryResultAll, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong). queryResultAll, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).
WithFilter(fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, pos)).WithOutputFields([]string{"*"})) WithFilter(fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, pos)).WithOutputFields([]string{"*"}))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
common.CheckOutputFields(t, allFieldsName, queryResultAll.Fields) common.CheckOutputFields(t, allFieldsName, queryResultAll.Fields)
@ -305,7 +304,7 @@ func TestQueryOutputAllFieldsRows(t *testing.T) {
// prepare and insert data // prepare and insert data
rows := hp.GenAllFieldsRows(common.DefaultNb, false, *hp.TNewDataOption().TWithMaxCapacity(common.TestCapacity)) rows := hp.GenAllFieldsRows(common.DefaultNb, false, *hp.TNewDataOption().TWithMaxCapacity(common.TestCapacity))
ids, err := mc.Insert(ctx, clientv2.NewRowBasedInsertOption(schema.CollectionName, rows...)) ids, err := mc.Insert(ctx, client.NewRowBasedInsertOption(schema.CollectionName, rows...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, int64(common.DefaultNb), ids.InsertCount) require.Equal(t, int64(common.DefaultNb), ids.InsertCount)
@ -318,7 +317,7 @@ func TestQueryOutputAllFieldsRows(t *testing.T) {
for _, field := range schema.Fields { for _, field := range schema.Fields {
allFieldsName = append(allFieldsName, field.Name) allFieldsName = append(allFieldsName, field.Name)
} }
queryResultAll, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong). queryResultAll, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).
WithFilter(fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 10)).WithOutputFields([]string{"*"})) WithFilter(fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, 10)).WithOutputFields([]string{"*"}))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
common.CheckOutputFields(t, allFieldsName, queryResultAll.Fields) common.CheckOutputFields(t, allFieldsName, queryResultAll.Fields)
@ -340,7 +339,7 @@ func TestQueryOutputBinaryAndVarchar(t *testing.T) {
for _, field := range schema.Fields { for _, field := range schema.Fields {
columns = append(columns, hp.GenColumnData(common.DefaultNb, field.DataType, *hp.TNewDataOption().TWithDim(common.DefaultDim))) columns = append(columns, hp.GenColumnData(common.DefaultNb, field.DataType, *hp.TNewDataOption().TWithDim(common.DefaultDim)))
} }
ids, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, columns...).WithColumns(dynamicColumns...)) ids, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, columns...).WithColumns(dynamicColumns...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, int64(common.DefaultNb), ids.InsertCount) require.Equal(t, int64(common.DefaultNb), ids.InsertCount)
prepare.FlushData(ctx, t, mc, schema.CollectionName) prepare.FlushData(ctx, t, mc, schema.CollectionName)
@ -348,7 +347,7 @@ func TestQueryOutputBinaryAndVarchar(t *testing.T) {
// query output all fields -> output all fields, includes vector and $meta field // query output all fields -> output all fields, includes vector and $meta field
expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4', '5'] ", common.DefaultVarcharFieldName) expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4', '5'] ", common.DefaultVarcharFieldName)
allFieldsName := []string{common.DefaultVarcharFieldName, common.DefaultBinaryVecFieldName, common.DefaultDynamicFieldName} allFieldsName := []string{common.DefaultVarcharFieldName, common.DefaultBinaryVecFieldName, common.DefaultDynamicFieldName}
queryResultAll, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong). queryResultAll, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithOutputFields([]string{"*"})) WithFilter(expr).WithOutputFields([]string{"*"}))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
common.CheckOutputFields(t, allFieldsName, queryResultAll.Fields) common.CheckOutputFields(t, allFieldsName, queryResultAll.Fields)
@ -376,7 +375,7 @@ func TestQueryOutputSparse(t *testing.T) {
columns = append(columns, hp.GenColumnData(common.DefaultNb, field.DataType, *hp.TNewDataOption().TWithSparseMaxLen(10))) columns = append(columns, hp.GenColumnData(common.DefaultNb, field.DataType, *hp.TNewDataOption().TWithSparseMaxLen(10)))
} }
ids, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, columns...)) ids, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, columns...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, int64(common.DefaultNb), ids.InsertCount) require.Equal(t, int64(common.DefaultNb), ids.InsertCount)
prepare.FlushData(ctx, t, mc, schema.CollectionName) prepare.FlushData(ctx, t, mc, schema.CollectionName)
@ -384,7 +383,7 @@ func TestQueryOutputSparse(t *testing.T) {
// query output all fields -> output all fields, includes vector and $meta field // query output all fields -> output all fields, includes vector and $meta field
expr := fmt.Sprintf("%s < 100 ", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s < 100 ", common.DefaultInt64FieldName)
expFieldsName := []string{common.DefaultInt64FieldName, common.DefaultVarcharFieldName, common.DefaultSparseVecFieldName} expFieldsName := []string{common.DefaultInt64FieldName, common.DefaultVarcharFieldName, common.DefaultSparseVecFieldName}
queryResultAll, errQuery := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{"*"})) queryResultAll, errQuery := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{"*"}))
common.CheckErr(t, errQuery, true) common.CheckErr(t, errQuery, true)
common.CheckOutputFields(t, expFieldsName, queryResultAll.Fields) common.CheckOutputFields(t, expFieldsName, queryResultAll.Fields)
@ -416,7 +415,7 @@ func TestQueryArrayDifferentLenBetweenRows(t *testing.T) {
} }
columns = append(columns, hp.GenColumnData(common.DefaultNb, field.DataType, *genDataOpt)) columns = append(columns, hp.GenColumnData(common.DefaultNb, field.DataType, *genDataOpt))
} }
ids, err := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName, columns...)) ids, err := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName, columns...))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
require.Equal(t, int64(common.DefaultNb), ids.InsertCount) require.Equal(t, int64(common.DefaultNb), ids.InsertCount)
} }
@ -424,19 +423,19 @@ func TestQueryArrayDifferentLenBetweenRows(t *testing.T) {
// query array idx exceeds max capacity, array[200] // query array idx exceeds max capacity, array[200]
expr := fmt.Sprintf("%s[%d] > 0", common.DefaultInt64ArrayField, common.TestCapacity*2) expr := fmt.Sprintf("%s[%d] > 0", common.DefaultInt64ArrayField, common.TestCapacity*2)
countRes, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{common.QueryCountFieldName})) countRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{common.QueryCountFieldName}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
count, _ := countRes.Fields[0].GetAsInt64(0) count, _ := countRes.Fields[0].GetAsInt64(0)
require.Equal(t, int64(0), count) require.Equal(t, int64(0), count)
countRes, err = mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{"Count(*)"})) countRes, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr).WithOutputFields([]string{"Count(*)"}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
count, _ = countRes.Fields[0].GetAsInt64(0) count, _ = countRes.Fields[0].GetAsInt64(0)
require.Equal(t, int64(0), count) require.Equal(t, int64(0), count)
// query: some rows has element greater than expr index array[100] // query: some rows has element greater than expr index array[100]
expr2 := fmt.Sprintf("%s[%d] > 0", common.DefaultInt64ArrayField, common.TestCapacity) expr2 := fmt.Sprintf("%s[%d] > 0", common.DefaultInt64ArrayField, common.TestCapacity)
countRes2, err2 := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr2).WithOutputFields([]string{common.QueryCountFieldName})) countRes2, err2 := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(expr2).WithOutputFields([]string{common.QueryCountFieldName}))
common.CheckErr(t, err2, true) common.CheckErr(t, err2, true)
count2, _ := countRes2.Fields[0].GetAsInt64(0) count2, _ := countRes2.Fields[0].GetAsInt64(0)
require.Equal(t, int64(common.DefaultNb), count2) require.Equal(t, int64(common.DefaultNb), count2)
@ -456,7 +455,7 @@ func TestQueryJsonDynamicExpr(t *testing.T) {
// query with different expr and count // query with different expr and count
expr := fmt.Sprintf("%s['number'] < 10 || %s < 10", common.DefaultJSONFieldName, common.DefaultDynamicNumberField) expr := fmt.Sprintf("%s['number'] < 10 || %s < 10", common.DefaultJSONFieldName, common.DefaultDynamicNumberField)
queryRes, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong). queryRes, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong).
WithOutputFields([]string{common.DefaultJSONFieldName, common.DefaultDynamicFieldName})) WithOutputFields([]string{common.DefaultJSONFieldName, common.DefaultDynamicFieldName}))
// verify output fields and count, dynamicNumber value // verify output fields and count, dynamicNumber value
@ -487,7 +486,7 @@ func TestQueryInvalidExpr(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
for _, _invalidExpr := range common.InvalidExpressions { for _, _invalidExpr := range common.InvalidExpressions {
_, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(_invalidExpr.Expr)) _, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(_invalidExpr.Expr))
common.CheckErr(t, err, _invalidExpr.ErrNil, _invalidExpr.ErrMsg) common.CheckErr(t, err, _invalidExpr.ErrNil, _invalidExpr.ErrMsg)
} }
} }
@ -558,7 +557,7 @@ func TestQueryCountJsonDynamicExpr(t *testing.T) {
for _, _exprCount := range exprCounts { for _, _exprCount := range exprCounts {
log.Debug("TestQueryCountJsonDynamicExpr", zap.String("expr", _exprCount.expr)) log.Debug("TestQueryCountJsonDynamicExpr", zap.String("expr", _exprCount.expr))
countRes, _ := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(_exprCount.expr).WithOutputFields([]string{common.QueryCountFieldName})) countRes, _ := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(_exprCount.expr).WithOutputFields([]string{common.QueryCountFieldName}))
count, _ := countRes.Fields[0].GetAsInt64(0) count, _ := countRes.Fields[0].GetAsInt64(0)
require.Equal(t, _exprCount.count, count) require.Equal(t, _exprCount.count, count)
} }
@ -601,7 +600,7 @@ func TestQueryArrayFieldExpr(t *testing.T) {
for _, _exprCount := range exprCounts { for _, _exprCount := range exprCounts {
log.Debug("TestQueryCountJsonDynamicExpr", zap.String("expr", _exprCount.expr)) log.Debug("TestQueryCountJsonDynamicExpr", zap.String("expr", _exprCount.expr))
countRes, _ := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(_exprCount.expr).WithOutputFields([]string{common.QueryCountFieldName})) countRes, _ := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(_exprCount.expr).WithOutputFields([]string{common.QueryCountFieldName}))
count, _ := countRes.Fields[0].GetAsInt64(0) count, _ := countRes.Fields[0].GetAsInt64(0)
require.Equal(t, _exprCount.count, count) require.Equal(t, _exprCount.count, count)
} }
@ -633,7 +632,7 @@ func TestQueryOutputInvalidOutputFieldCount(t *testing.T) {
queryExpr := fmt.Sprintf("%s >= 0", common.DefaultInt64FieldName) queryExpr := fmt.Sprintf("%s >= 0", common.DefaultInt64FieldName)
// query with empty output fields []string{}-> output "int64" // query with empty output fields []string{}-> output "int64"
_, err := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(queryExpr).WithOutputFields([]string{invalidCount.countField})) _, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithConsistencyLevel(entity.ClStrong).WithFilter(queryExpr).WithOutputFields([]string{invalidCount.countField}))
common.CheckErr(t, err, false, invalidCount.errMsg) common.CheckErr(t, err, false, invalidCount.errMsg)
} }
} }

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
clientv2 "github.com/milvus-io/milvus/client/v2" "github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/client/v2/column" "github.com/milvus-io/milvus/client/v2/column"
"github.com/milvus-io/milvus/client/v2/entity" "github.com/milvus-io/milvus/client/v2/entity"
"github.com/milvus-io/milvus/client/v2/index" "github.com/milvus-io/milvus/client/v2/index"
@ -31,7 +31,7 @@ func TestSearchDefault(t *testing.T) {
// search // search
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
resSearch, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong)) resSearch, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit)
} }
@ -48,7 +48,7 @@ func TestSearchDefaultGrowing(t *testing.T) {
// search // search
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector)
resSearch, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong)) resSearch, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit)
} }
@ -60,20 +60,20 @@ func TestSearchInvalidCollectionPartitionName(t *testing.T) {
// search with not exist collection // search with not exist collection
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
_, err := mc.Search(ctx, clientv2.NewSearchOption("aaa", common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong)) _, err := mc.Search(ctx, client.NewSearchOption("aaa", common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, false, "can't find collection") common.CheckErr(t, err, false, "can't find collection")
// search with empty collections name // search with empty collections name
_, err = mc.Search(ctx, clientv2.NewSearchOption("", common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong)) _, err = mc.Search(ctx, client.NewSearchOption("", common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, false, "collection name should not be empty") common.CheckErr(t, err, false, "collection name should not be empty")
// search with not exist partition // search with not exist partition
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.VarcharBinary), hp.TNewFieldsOption(), hp.TNewSchemaOption()) _, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.VarcharBinary), hp.TNewFieldsOption(), hp.TNewSchemaOption())
_, err1 := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithPartitions([]string{"aaa"})) _, err1 := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithPartitions([]string{"aaa"}))
common.CheckErr(t, err1, false, "partition name aaa not found") common.CheckErr(t, err1, false, "partition name aaa not found")
// search with empty partition name []string{""} -> error // search with empty partition name []string{""} -> error
_, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors). _, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).
WithConsistencyLevel(entity.ClStrong).WithANNSField(common.DefaultFloatVecFieldName).WithPartitions([]string{""})) WithConsistencyLevel(entity.ClStrong).WithANNSField(common.DefaultFloatVecFieldName).WithPartitions([]string{""}))
common.CheckErr(t, errSearch, false, "Partition name should not be empty") common.CheckErr(t, errSearch, false, "Partition name should not be empty")
} }
@ -101,7 +101,7 @@ func TestSearchEmptyCollection(t *testing.T) {
{fieldName: common.DefaultBFloat16VecFieldName, queryVec: hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBFloat16Vector)}, {fieldName: common.DefaultBFloat16VecFieldName, queryVec: hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBFloat16Vector)},
{fieldName: common.DefaultBinaryVecFieldName, queryVec: hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector)}, {fieldName: common.DefaultBinaryVecFieldName, queryVec: hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector)},
} { } {
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, _mNameVec.queryVec). resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, _mNameVec.queryVec).
WithConsistencyLevel(entity.ClStrong).WithANNSField(_mNameVec.fieldName)) WithConsistencyLevel(entity.ClStrong).WithANNSField(_mNameVec.fieldName))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
t.Log("https://github.com/milvus-io/milvus/issues/33952") t.Log("https://github.com/milvus-io/milvus/issues/33952")
@ -121,7 +121,7 @@ func TestSearchEmptySparseCollection(t *testing.T) {
// search // search
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector)
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors). resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).
WithConsistencyLevel(entity.ClStrong).WithANNSField(common.DefaultSparseVecFieldName)) WithConsistencyLevel(entity.ClStrong).WithANNSField(common.DefaultSparseVecFieldName))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
t.Log("https://github.com/milvus-io/milvus/issues/33952") t.Log("https://github.com/milvus-io/milvus/issues/33952")
@ -137,17 +137,17 @@ func TestSearchPartitions(t *testing.T) {
// create collection and partition // create collection and partition
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption().TWithAutoID(true), prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption().TWithAutoID(true),
hp.TNewSchemaOption().TWithEnableDynamicField(true)) hp.TNewSchemaOption().TWithEnableDynamicField(true))
err := mc.CreatePartition(ctx, clientv2.NewCreatePartitionOption(schema.CollectionName, parName)) err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
// insert autoID data into parName and _default partitions // insert autoID data into parName and _default partitions
_defVec := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *hp.TNewDataOption()) _defVec := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *hp.TNewDataOption())
_defDynamic := hp.GenDynamicColumnData(0, common.DefaultNb) _defDynamic := hp.GenDynamicColumnData(0, common.DefaultNb)
insertRes1, err1 := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(_defVec).WithColumns(_defDynamic...)) insertRes1, err1 := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(_defVec).WithColumns(_defDynamic...))
common.CheckErr(t, err1, true) common.CheckErr(t, err1, true)
_parVec := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *hp.TNewDataOption()) _parVec := hp.GenColumnData(common.DefaultNb, entity.FieldTypeFloatVector, *hp.TNewDataOption())
insertRes2, err2 := mc.Insert(ctx, clientv2.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(_parVec)) insertRes2, err2 := mc.Insert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(_parVec))
common.CheckErr(t, err2, true) common.CheckErr(t, err2, true)
// flush -> FLAT index -> load // flush -> FLAT index -> load
@ -160,7 +160,7 @@ func TestSearchPartitions(t *testing.T) {
// query first ID of _default and parName partition // query first ID of _default and parName partition
_defId0, _ := insertRes1.IDs.GetAsInt64(0) _defId0, _ := insertRes1.IDs.GetAsInt64(0)
_parId0, _ := insertRes2.IDs.GetAsInt64(0) _parId0, _ := insertRes2.IDs.GetAsInt64(0)
queryRes, _ := mc.Query(ctx, clientv2.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("int64 in [%d, %d]", _defId0, _parId0)).WithOutputFields([]string{"*"})) queryRes, _ := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("int64 in [%d, %d]", _defId0, _parId0)).WithOutputFields([]string{"*"}))
require.ElementsMatch(t, []int64{_defId0, _parId0}, queryRes.GetColumn(common.DefaultInt64FieldName).(*column.ColumnInt64).Data()) require.ElementsMatch(t, []int64{_defId0, _parId0}, queryRes.GetColumn(common.DefaultInt64FieldName).(*column.ColumnInt64).Data())
for _, vec := range queryRes.GetColumn(common.DefaultFloatVecFieldName).(*column.ColumnFloatVector).Data() { for _, vec := range queryRes.GetColumn(common.DefaultFloatVecFieldName).(*column.ColumnFloatVector).Data() {
vectors = append(vectors, entity.FloatVector(vec)) vectors = append(vectors, entity.FloatVector(vec))
@ -168,7 +168,7 @@ func TestSearchPartitions(t *testing.T) {
for _, partitions := range [][]string{{}, {common.DefaultPartition, parName}} { for _, partitions := range [][]string{{}, {common.DefaultPartition, parName}} {
// search with empty partition names slice []string{} -> all partitions // search with empty partition names slice []string{} -> all partitions
searchResult, errSearch1 := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, 5, vectors). searchResult, errSearch1 := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, 5, vectors).
WithConsistencyLevel(entity.ClStrong).WithANNSField(common.DefaultFloatVecFieldName).WithPartitions(partitions).WithOutputFields([]string{"*"})) WithConsistencyLevel(entity.ClStrong).WithANNSField(common.DefaultFloatVecFieldName).WithPartitions(partitions).WithOutputFields([]string{"*"}))
// check search result contains search vector, which from all partitions // check search result contains search vector, which from all partitions
@ -196,12 +196,12 @@ func TestSearchEmptyOutputFields(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
resSearch, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{})) resSearch, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit)
common.CheckOutputFields(t, []string{}, resSearch[0].Fields) common.CheckOutputFields(t, []string{}, resSearch[0].Fields)
_, err = mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{""})) _, err = mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{""}))
if dynamic { if dynamic {
common.CheckErr(t, err, false, "parse output field name failed") common.CheckErr(t, err, false, "parse output field name failed")
} else { } else {
@ -238,7 +238,7 @@ func TestSearchNotExistOutputFields(t *testing.T) {
} }
for _, _dof := range dof { for _, _dof := range dof {
resSearch, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields(_dof.outputFields)) resSearch, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields(_dof.outputFields))
if enableDynamic { if enableDynamic {
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit)
@ -248,7 +248,7 @@ func TestSearchNotExistOutputFields(t *testing.T) {
} }
} }
existedRepeatedFields := []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName, common.DefaultInt64FieldName, common.DefaultFloatVecFieldName} existedRepeatedFields := []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName, common.DefaultInt64FieldName, common.DefaultFloatVecFieldName}
resSearch2, err2 := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields(existedRepeatedFields)) resSearch2, err2 := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields(existedRepeatedFields))
common.CheckErr(t, err2, true) common.CheckErr(t, err2, true)
common.CheckSearchResult(t, resSearch2, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, resSearch2, common.DefaultNq, common.DefaultLimit)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName}, resSearch2[0].Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatVecFieldName}, resSearch2[0].Fields)
@ -274,7 +274,7 @@ func TestSearchOutputAllFields(t *testing.T) {
} }
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
searchRes, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithANNSField(common.DefaultFloatVecFieldName).WithOutputFields([]string{"*"})) WithANNSField(common.DefaultFloatVecFieldName).WithOutputFields([]string{"*"}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit)
@ -301,7 +301,7 @@ func TestSearchOutputBinaryPk(t *testing.T) {
allFieldsName = append(allFieldsName, field.Name) allFieldsName = append(allFieldsName, field.Name)
} }
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeBinaryVector)
searchRes, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{"*"})) searchRes, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{"*"}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit)
for _, res := range searchRes { for _, res := range searchRes {
@ -327,7 +327,7 @@ func TestSearchOutputSparse(t *testing.T) {
allFieldsName = append(allFieldsName, field.Name) allFieldsName = append(allFieldsName, field.Name)
} }
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector)
searchRes, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithANNSField(common.DefaultSparseVecFieldName).WithOutputFields([]string{"*"})) WithANNSField(common.DefaultSparseVecFieldName).WithOutputFields([]string{"*"}))
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit) common.CheckSearchResult(t, searchRes, common.DefaultNq, common.DefaultLimit)
@ -372,7 +372,7 @@ func TestSearchInvalidVectorField(t *testing.T) {
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector)
for _, invalidVectorField := range invalidVectorFields { for _, invalidVectorField := range invalidVectorFields {
_, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithANNSField(invalidVectorField.vectorField)) _, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithANNSField(invalidVectorField.vectorField))
common.CheckErr(t, err, invalidVectorField.errNil, invalidVectorField.errMsg) common.CheckErr(t, err, invalidVectorField.errNil, invalidVectorField.errMsg)
} }
} }
@ -412,7 +412,7 @@ func TestSearchInvalidVectors(t *testing.T) {
} }
for _, invalidVector := range invalidVectors { for _, invalidVector := range invalidVectors {
_, errSearchEmpty := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, invalidVector.vectors).WithANNSField(invalidVector.fieldName)) _, errSearchEmpty := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, invalidVector.vectors).WithANNSField(invalidVector.fieldName))
common.CheckErr(t, errSearchEmpty, false, invalidVector.errMsg) common.CheckErr(t, errSearchEmpty, false, invalidVector.errMsg)
} }
} }
@ -448,7 +448,7 @@ func TestSearchEmptyInvalidVectors(t *testing.T) {
} }
for _, invalidVector := range invalidVectors { for _, invalidVector := range invalidVectors {
_, errSearchEmpty := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, invalidVector.vectors).WithANNSField(common.DefaultFloatVecFieldName)) _, errSearchEmpty := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, invalidVector.vectors).WithANNSField(common.DefaultFloatVecFieldName))
common.CheckErr(t, errSearchEmpty, invalidVector.errNil, invalidVector.errMsg) common.CheckErr(t, errSearchEmpty, invalidVector.errNil, invalidVector.errMsg)
} }
} }
@ -467,7 +467,7 @@ func TestSearchNotMatchMetricType(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
_, errSearchEmpty := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors)) _, errSearchEmpty := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors))
common.CheckErr(t, errSearchEmpty, false, "metric type not match: invalid parameter") common.CheckErr(t, errSearchEmpty, false, "metric type not match: invalid parameter")
} }
@ -484,7 +484,7 @@ func TestSearchInvalidTopK(t *testing.T) {
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
for _, invalidTopK := range []int{-1, 0, 16385} { for _, invalidTopK := range []int{-1, 0, 16385} {
_, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, invalidTopK, vectors)) _, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, invalidTopK, vectors))
common.CheckErr(t, errSearch, false, "should be in range [1, 16384]") common.CheckErr(t, errSearch, false, "should be in range [1, 16384]")
} }
} }
@ -502,7 +502,7 @@ func TestSearchInvalidOffset(t *testing.T) {
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
for _, invalidOffset := range []int{-1, common.MaxTopK + 1} { for _, invalidOffset := range []int{-1, common.MaxTopK + 1} {
_, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithOffset(invalidOffset)) _, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithOffset(invalidOffset))
common.CheckErr(t, errSearch, false, "should be in range [1, 16384]") common.CheckErr(t, errSearch, false, "should be in range [1, 16384]")
} }
} }
@ -526,7 +526,7 @@ func TestSearchEfHnsw(t *testing.T) {
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
_, err := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors)) _, err := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors))
common.CheckErr(t, err, false, "ef(7) should be larger than k(10)") common.CheckErr(t, err, false, "ef(7) should be larger than k(10)")
} }
@ -545,7 +545,7 @@ func TestSearchInvalidScannReorderK(t *testing.T) {
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, 500), hp.TNewDataOption()) prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, 500), hp.TNewDataOption())
prepare.FlushData(ctx, t, mc, schema.CollectionName) prepare.FlushData(ctx, t, mc, schema.CollectionName)
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema).TWithFieldIndex(map[string]index.Index{ prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema).TWithFieldIndex(map[string]index.Index{
common.DefaultFloatVecFieldName: index.NewSCANNIndex(entity.COSINE, 16, false), common.DefaultFloatVecFieldName: index.NewSCANNIndex(entity.COSINE, 16, true),
})) }))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
@ -573,7 +573,7 @@ func TestSearchScannAllMetricsWithRawData(t *testing.T) {
// search and output all fields // search and output all fields
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{"*"})) resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).WithOutputFields([]string{"*"}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatFieldName, common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultFloatFieldName,
common.DefaultJSONFieldName, common.DefaultFloatVecFieldName, common.DefaultDynamicFieldName}, resSearch[0].Fields) common.DefaultJSONFieldName, common.DefaultFloatVecFieldName, common.DefaultDynamicFieldName}, resSearch[0].Fields)
@ -603,7 +603,7 @@ func TestSearchExpr(t *testing.T) {
{expr: fmt.Sprintf("%s < 10", common.DefaultInt64FieldName), ids: []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, {expr: fmt.Sprintf("%s < 10", common.DefaultInt64FieldName), ids: []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
{expr: fmt.Sprintf("%s in [10, 100]", common.DefaultInt64FieldName), ids: []int64{10, 100}}, {expr: fmt.Sprintf("%s in [10, 100]", common.DefaultInt64FieldName), ids: []int64{10, 100}},
} { } {
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(_mExpr.expr)) WithFilter(_mExpr.expr))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
for _, res := range resSearch { for _, res := range resSearch {
@ -629,7 +629,7 @@ func TestSearchInvalidExpr(t *testing.T) {
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
for _, exprStruct := range common.InvalidExpressions { for _, exprStruct := range common.InvalidExpressions {
log.Debug("TestSearchInvalidExpr", zap.String("expr", exprStruct.Expr)) log.Debug("TestSearchInvalidExpr", zap.String("expr", exprStruct.Expr))
_, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). _, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(exprStruct.Expr).WithANNSField(common.DefaultFloatVecFieldName)) WithFilter(exprStruct.Expr).WithANNSField(common.DefaultFloatVecFieldName))
common.CheckErr(t, errSearch, exprStruct.ErrNil, exprStruct.ErrMsg) common.CheckErr(t, errSearch, exprStruct.ErrNil, exprStruct.ErrMsg)
} }
@ -674,7 +674,7 @@ func TestSearchJsonFieldExpr(t *testing.T) {
for _, expr := range exprs { for _, expr := range exprs {
log.Debug("TestSearchJsonFieldExpr", zap.String("expr", expr)) log.Debug("TestSearchJsonFieldExpr", zap.String("expr", expr))
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
searchRes, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName).WithOutputFields([]string{common.DefaultInt64FieldName, common.DefaultJSONFieldName})) WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName).WithOutputFields([]string{common.DefaultInt64FieldName, common.DefaultJSONFieldName}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultJSONFieldName}, searchRes[0].Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, common.DefaultJSONFieldName}, searchRes[0].Fields)
@ -709,7 +709,7 @@ func TestSearchDynamicFieldExpr(t *testing.T) {
for _, expr := range exprs { for _, expr := range exprs {
log.Debug("TestSearchDynamicFieldExpr", zap.String("expr", expr)) log.Debug("TestSearchDynamicFieldExpr", zap.String("expr", expr))
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
searchRes, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName).WithOutputFields([]string{common.DefaultInt64FieldName, "dynamicNumber", "number"})) WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName).WithOutputFields([]string{common.DefaultInt64FieldName, "dynamicNumber", "number"}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, "dynamicNumber", "number"}, searchRes[0].Fields) common.CheckOutputFields(t, []string{common.DefaultInt64FieldName, "dynamicNumber", "number"}, searchRes[0].Fields)
@ -731,7 +731,7 @@ func TestSearchDynamicFieldExpr(t *testing.T) {
for _, expr := range exprs2 { for _, expr := range exprs2 {
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
searchRes, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName). WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName).
WithOutputFields([]string{common.DefaultInt64FieldName, common.DefaultJSONFieldName, common.DefaultDynamicFieldName, "dynamicNumber", "number"})) WithOutputFields([]string{common.DefaultInt64FieldName, common.DefaultJSONFieldName, common.DefaultDynamicFieldName, "dynamicNumber", "number"}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
@ -778,7 +778,7 @@ func TestSearchArrayFieldExpr(t *testing.T) {
} }
vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeFloatVector)
for _, expr := range exprs { for _, expr := range exprs {
searchRes, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithOutputFields(allArrayFields)) WithFilter(expr).WithOutputFields(allArrayFields))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
common.CheckOutputFields(t, allArrayFields, searchRes[0].Fields) common.CheckOutputFields(t, allArrayFields, searchRes[0].Fields)
@ -786,7 +786,7 @@ func TestSearchArrayFieldExpr(t *testing.T) {
} }
// search hits empty // search hits empty
searchRes, errSearchEmpty := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). searchRes, errSearchEmpty := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(fmt.Sprintf("array_contains (%s, 1000000)", common.DefaultInt32ArrayField)).WithOutputFields(allArrayFields)) WithFilter(fmt.Sprintf("array_contains (%s, 1000000)", common.DefaultInt32ArrayField)).WithOutputFields(allArrayFields))
common.CheckErr(t, errSearchEmpty, true) common.CheckErr(t, errSearchEmpty, true)
common.CheckSearchResult(t, searchRes, common.DefaultNq, 0) common.CheckSearchResult(t, searchRes, common.DefaultNq, 0)
@ -810,7 +810,7 @@ func TestSearchNotExistedExpr(t *testing.T) {
// search with invalid expr // search with invalid expr
vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector) vectors := hp.GenSearchVectors(1, common.DefaultDim, entity.FieldTypeFloatVector)
expr := "id in [0]" expr := "id in [0]"
res, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong). res, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, vectors).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName)) WithFilter(expr).WithANNSField(common.DefaultFloatVecFieldName))
if isDynamic { if isDynamic {
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
@ -857,7 +857,7 @@ func TestSearchMultiVectors(t *testing.T) {
queryVec := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, fnt.fieldType) queryVec := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, fnt.fieldType)
expr := fmt.Sprintf("%s > 10", common.DefaultInt64FieldName) expr := fmt.Sprintf("%s > 10", common.DefaultInt64FieldName)
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit*2, queryVec).WithConsistencyLevel(entity.ClStrong). resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit*2, queryVec).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithANNSField(fnt.fieldName).WithOutputFields([]string{"*"})) WithFilter(expr).WithANNSField(fnt.fieldName).WithOutputFields([]string{"*"}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit*2) common.CheckSearchResult(t, resSearch, common.DefaultNq, common.DefaultLimit*2)
@ -867,7 +867,7 @@ func TestSearchMultiVectors(t *testing.T) {
}, resSearch[0].Fields) }, resSearch[0].Fields)
// pagination search // pagination search
resPage, errPage := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong). resPage, errPage := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong).
WithFilter(expr).WithANNSField(fnt.fieldName).WithOutputFields([]string{"*"}).WithOffset(10)) WithFilter(expr).WithANNSField(fnt.fieldName).WithOutputFields([]string{"*"}).WithOffset(10))
common.CheckErr(t, errPage, true) common.CheckErr(t, errPage, true)
@ -902,7 +902,7 @@ func TestSearchSparseVector(t *testing.T) {
// search // search
queryVec := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector) queryVec := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector)
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong). resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong).
WithOutputFields([]string{"*"})) WithOutputFields([]string{"*"}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
@ -934,12 +934,12 @@ func TestSearchInvalidSparseVector(t *testing.T) {
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema).TWithFieldIndex(map[string]index.Index{common.DefaultSparseVecFieldName: idx})) prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema).TWithFieldIndex(map[string]index.Index{common.DefaultSparseVecFieldName: idx}))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName)) prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
_, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, []entity.Vector{}).WithConsistencyLevel(entity.ClStrong)) _, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, []entity.Vector{}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errSearch, false, "nq (number of search vector per search request) should be in range [1, 16384]") common.CheckErr(t, errSearch, false, "nq (number of search vector per search request) should be in range [1, 16384]")
vector1, err := entity.NewSliceSparseEmbedding([]uint32{}, []float32{}) vector1, err := entity.NewSliceSparseEmbedding([]uint32{}, []float32{})
common.CheckErr(t, err, true) common.CheckErr(t, err, true)
_, errSearch1 := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, []entity.Vector{vector1}).WithConsistencyLevel(entity.ClStrong)) _, errSearch1 := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, []entity.Vector{vector1}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errSearch1, false, "Sparse row data should not be empty") common.CheckErr(t, errSearch1, false, "Sparse row data should not be empty")
positions := make([]uint32, 100) positions := make([]uint32, 100)
@ -949,7 +949,7 @@ func TestSearchInvalidSparseVector(t *testing.T) {
values[i] = rand.Float32() values[i] = rand.Float32()
} }
vector, _ := entity.NewSliceSparseEmbedding(positions, values) vector, _ := entity.NewSliceSparseEmbedding(positions, values)
_, errSearch2 := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, []entity.Vector{vector}).WithConsistencyLevel(entity.ClStrong)) _, errSearch2 := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, []entity.Vector{vector}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, errSearch2, false, "Invalid sparse row: id should be strict ascending") common.CheckErr(t, errSearch2, false, "Invalid sparse row: id should be strict ascending")
} }
} }
@ -971,12 +971,12 @@ func TestSearchSparseVectorPagination(t *testing.T) {
// search // search
queryVec := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector) queryVec := hp.GenSearchVectors(common.DefaultNq, common.DefaultDim, entity.FieldTypeSparseVector)
resSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong). resSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong).
WithOutputFields([]string{"*"})) WithOutputFields([]string{"*"}))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
require.Len(t, resSearch, common.DefaultNq) require.Len(t, resSearch, common.DefaultNq)
pageSearch, errSearch := mc.Search(ctx, clientv2.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong). pageSearch, errSearch := mc.Search(ctx, client.NewSearchOption(schema.CollectionName, common.DefaultLimit, queryVec).WithConsistencyLevel(entity.ClStrong).
WithOutputFields([]string{"*"}).WithOffset(5)) WithOutputFields([]string{"*"}).WithOffset(5))
common.CheckErr(t, errSearch, true) common.CheckErr(t, errSearch, true)
require.Len(t, pageSearch, common.DefaultNq) require.Len(t, pageSearch, common.DefaultNq)

View File

@ -0,0 +1,447 @@
package testcases
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/milvus-io/milvus/client/v2"
"github.com/milvus-io/milvus/client/v2/column"
"github.com/milvus-io/milvus/client/v2/entity"
"github.com/milvus-io/milvus/tests/go_client/common"
hp "github.com/milvus-io/milvus/tests/go_client/testcases/helper"
)
func TestUpsertAllFields(t *testing.T) {
/*
1. prepare create -> insert -> index -> load -> query
2. upsert exist entities -> data updated -> query and verify
3. delete some pks -> query and verify
4. upsert part deleted(not exist) pk and part existed pk -> query and verify
5. upsert all not exist pk -> query and verify
*/
t.Parallel()
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
// connect
mc := createDefaultMilvusClient(ctx, t)
// create -> insert [0, 3000) -> flush -> index -> load
// create -> insert -> flush -> index -> load
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.AllFields), hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true))
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, 0), hp.TNewDataOption())
prepare.FlushData(ctx, t, mc, schema.CollectionName)
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
upsertNb := 200
// upsert exist entities [0, 200) -> query and verify
columns, dynamicColumns := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(upsertNb))
upsertRes, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columns...).WithColumns(dynamicColumns...))
common.CheckErr(t, err, true)
require.EqualValues(t, upsertNb, upsertRes.UpsertCount)
expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, upsertNb)
resSet, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, append(columns, hp.MergeColumnsToDynamic(upsertNb, dynamicColumns, common.DefaultDynamicFieldName)), resSet.Fields)
// deleted all upsert entities -> query and verify
delRes, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr))
common.CheckErr(t, err, true)
require.EqualValues(t, upsertNb, delRes.DeleteCount)
resSet, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
require.Zero(t, resSet.ResultCount)
// upsert part deleted(not exist) pk and part existed pk [100, 500) -> query and verify the updated entities
newUpsertNb := 400
newUpsertStart := 100
columnsPart, dynamicColumnsPart := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(newUpsertNb).TWithStart(newUpsertStart))
upsertResPart, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columnsPart...).WithColumns(dynamicColumnsPart...))
common.CheckErr(t, err, true)
require.EqualValues(t, newUpsertNb, upsertResPart.UpsertCount)
newExpr := fmt.Sprintf("%d <= %s < %d", newUpsertStart, common.DefaultInt64FieldName, newUpsertNb+newUpsertStart)
resSetPart, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(newExpr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, append(columnsPart, hp.MergeColumnsToDynamic(newUpsertNb, dynamicColumnsPart, common.DefaultDynamicFieldName)), resSetPart.Fields)
// upsert all deleted(not exist) pk [0, 100)
columnsNot, dynamicColumnsNot := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(newUpsertStart))
upsertResNot, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columnsNot...).WithColumns(dynamicColumnsNot...))
common.CheckErr(t, err, true)
require.EqualValues(t, newUpsertStart, upsertResNot.UpsertCount)
newExprNot := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, newUpsertStart)
resSetNot, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(newExprNot).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, append(columnsNot, hp.MergeColumnsToDynamic(newUpsertStart, dynamicColumnsNot, common.DefaultDynamicFieldName)), resSetNot.Fields)
}
func TestUpsertSparse(t *testing.T) {
t.Skip("https://github.com/milvus-io/milvus-sdk-go/issues/769")
/*
1. prepare create -> insert -> index -> load -> query
2. upsert exist entities -> data updated -> query and verify
3. delete some pks -> query and verify
4. upsert part deleted(not exist) pk and part existed pk -> query and verify
5. upsert all not exist pk -> query and verify
*/
t.Parallel()
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
// connect
mc := createDefaultMilvusClient(ctx, t)
// create -> insert [0, 3000) -> flush -> index -> load
// create -> insert -> flush -> index -> load
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64VarcharSparseVec), hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true))
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, 0), hp.TNewDataOption().TWithSparseMaxLen(128))
prepare.FlushData(ctx, t, mc, schema.CollectionName)
upsertNb := 200
// upsert exist entities [0, 200) -> query and verify
columns, dynamicColumns := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(upsertNb))
upsertRes, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columns...).WithColumns(dynamicColumns...))
common.CheckErr(t, err, true)
require.EqualValues(t, upsertNb, upsertRes.UpsertCount)
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
expr := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, upsertNb)
resSet, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, append(columns, hp.MergeColumnsToDynamic(upsertNb, dynamicColumns, common.DefaultDynamicFieldName)), resSet.Fields)
// deleted all upsert entities -> query and verify
delRes, err := mc.Delete(ctx, client.NewDeleteOption(schema.CollectionName).WithExpr(expr))
common.CheckErr(t, err, true)
require.EqualValues(t, upsertNb, delRes.DeleteCount)
resSet, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
require.Zero(t, resSet.ResultCount)
// upsert part deleted(not exist) pk and part existed pk [100, 500) -> query and verify the updated entities
newUpsertNb := 400
newUpsertStart := 100
columnsPart, dynamicColumnsPart := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(newUpsertNb).TWithStart(newUpsertStart))
upsertResPart, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columnsPart...).WithColumns(dynamicColumnsPart...))
common.CheckErr(t, err, true)
require.EqualValues(t, newUpsertNb, upsertResPart.UpsertCount)
newExpr := fmt.Sprintf("%d <= %s < %d", newUpsertStart, common.DefaultInt64FieldName, newUpsertNb+newUpsertStart)
resSetPart, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(newExpr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, append(columnsPart, hp.MergeColumnsToDynamic(newUpsertNb, dynamicColumnsPart, common.DefaultDynamicFieldName)), resSetPart.Fields)
// upsert all deleted(not exist) pk [0, 100)
columnsNot, dynamicColumnsNot := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(newUpsertStart))
upsertResNot, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columnsNot...).WithColumns(dynamicColumnsNot...))
common.CheckErr(t, err, true)
require.EqualValues(t, newUpsertStart, upsertResNot.UpsertCount)
newExprNot := fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, newUpsertStart)
resSetNot, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(newExprNot).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, append(columnsNot, hp.MergeColumnsToDynamic(newUpsertStart, dynamicColumnsNot, common.DefaultDynamicFieldName)), resSetNot.Fields)
}
func TestUpsertVarcharPk(t *testing.T) {
/*
test upsert varchar pks
upsert after query
upsert "a" -> " a " -> actually new insert
*/
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
// create -> insert [0, 3000) -> flush -> index -> load
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.VarcharBinary), hp.TNewFieldsOption(), hp.TNewSchemaOption())
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, common.DefaultNb), hp.TNewDataOption())
prepare.FlushData(ctx, t, mc, schema.CollectionName)
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
upsertNb := 10
// upsert exist entities [0, 10) varchar: ["1", ... "9"]
genDataOpt := *hp.TNewDataOption()
varcharColumn, binaryColumn := hp.GenColumnData(upsertNb, entity.FieldTypeVarChar, genDataOpt), hp.GenColumnData(upsertNb, entity.FieldTypeBinaryVector, genDataOpt)
upsertRes, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(varcharColumn, binaryColumn))
common.CheckErr(t, err, true)
common.EqualColumn(t, varcharColumn, upsertRes.IDs)
// query and verify the updated entities
expr := fmt.Sprintf("%s in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] ", common.DefaultVarcharFieldName)
resSet1, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, []column.Column{varcharColumn, binaryColumn}, resSet1.Fields)
// upsert varchar (with space): [" 1 ", ... " 9 "]
varcharValues := make([]string, 0, upsertNb)
for i := 0; i < upsertNb; i++ {
varcharValues = append(varcharValues, " "+strconv.Itoa(i)+" ")
}
varcharColumn1 := column.NewColumnVarChar(common.DefaultVarcharFieldName, varcharValues)
binaryColumn1 := hp.GenColumnData(upsertNb, entity.FieldTypeBinaryVector, genDataOpt)
upsertRes1, err1 := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(varcharColumn1, binaryColumn1))
common.CheckErr(t, err1, true)
common.EqualColumn(t, varcharColumn1, upsertRes1.IDs)
// query old varchar pk (no space): ["1", ... "9"]
resSet2, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, []column.Column{varcharColumn, binaryColumn}, resSet2.Fields)
// query and verify the updated entities
exprNew := fmt.Sprintf("%s like ' %% ' ", common.DefaultVarcharFieldName)
resSet3, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(exprNew).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, []column.Column{varcharColumn1, binaryColumn1}, resSet3.Fields)
}
// test upsert with partition
func TestUpsertMultiPartitions(t *testing.T) {
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.AllFields), hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true))
parName := common.GenRandomString("p", 4)
err := mc.CreatePartition(ctx, client.NewCreatePartitionOption(schema.CollectionName, parName))
common.CheckErr(t, err, true)
// insert [0, nb) into default, insert [nb, nb*2) into new
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, common.DefaultNb), hp.TNewDataOption())
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, common.DefaultNb).TWithPartitionName(parName), hp.TNewDataOption().TWithStart(common.DefaultNb))
prepare.FlushData(ctx, t, mc, schema.CollectionName)
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
// upsert new partition
columns, dynamicColumns := hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithStart(common.DefaultNb))
upsertRes, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(columns...).WithColumns(dynamicColumns...).WithPartition(parName))
common.CheckErr(t, err, true)
require.EqualValues(t, common.DefaultNb, upsertRes.UpsertCount)
// query and verify
expr := fmt.Sprintf("%d <= %s < %d", common.DefaultNb, common.DefaultInt64FieldName, common.DefaultNb+200)
resSet3, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(expr).WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
expColumns := []column.Column{hp.MergeColumnsToDynamic(200, dynamicColumns, common.DefaultDynamicFieldName)}
for _, c := range columns {
expColumns = append(expColumns, c.Slice(0, 200))
}
common.CheckQueryResult(t, expColumns, resSet3.Fields)
}
func TestUpsertSamePksManyTimes(t *testing.T) {
// upsert pks [0, 1000) many times with different vector
// query -> gets last upsert entities
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout*2)
mc := createDefaultMilvusClient(ctx, t)
// create and insert
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.AllFields), hp.TNewFieldsOption(), hp.TNewSchemaOption())
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, common.DefaultNb), hp.TNewDataOption())
var _columns []column.Column
upsertNb := 10
for i := 0; i < 10; i++ {
// upsert exist entities [0, 10)
_columns, _ = hp.GenColumnsBasedSchema(schema, hp.TNewDataOption().TWithNb(upsertNb))
_, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(_columns...))
common.CheckErr(t, err, true)
}
// flush -> index -> load
prepare.FlushData(ctx, t, mc, schema.CollectionName)
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
// query and verify the updated entities
resSet, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, upsertNb)).
WithOutputFields([]string{common.DefaultFloatVecFieldName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
for _, c := range _columns {
if c.Name() == common.DefaultFloatVecFieldName {
common.EqualColumn(t, c, resSet.GetColumn(common.DefaultFloatVecFieldName))
}
}
}
// test upsert autoId collection
func TestUpsertAutoID(t *testing.T) {
/*
prepare autoID collection
upsert not exist pk -> error
upsert exist pk -> error ? autoID not supported upsert
*/
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption().TWithAutoID(true), hp.TNewSchemaOption())
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, 100), hp.TNewDataOption())
// upsert without pks
vecColumn := hp.GenColumnData(100, entity.FieldTypeFloatVector, *hp.TNewDataOption())
_, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(vecColumn))
common.CheckErr(t, err, false, "upsert can not assign primary field data when auto id enabled")
// upsert with pks
pkColumn := hp.GenColumnData(100, entity.FieldTypeInt64, *hp.TNewDataOption())
_, err1 := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, vecColumn))
common.CheckErr(t, err1, false, "upsert can not assign primary field data when auto id enabled")
}
// test upsert with invalid collection / partition name
func TestUpsertNotExistCollectionPartition(t *testing.T) {
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
// upsert not exist collection
_, errUpsert := mc.Upsert(ctx, client.NewColumnBasedInsertOption("aaa"))
common.CheckErr(t, errUpsert, false, "can't find collection")
// create default collection with autoID true
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
_, errUpsert = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithPartition("aaa"))
common.CheckErr(t, errUpsert, false, "field int64 not passed")
// upsert not exist partition
opt := *hp.TNewDataOption()
pkColumn, vecColumn := hp.GenColumnData(10, entity.FieldTypeInt64, opt), hp.GenColumnData(10, entity.FieldTypeFloatVector, opt)
_, errUpsert = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithPartition("aaa").WithColumns(pkColumn, vecColumn))
common.CheckErr(t, errUpsert, false, "partition not found[partition=aaa]")
}
// test upsert with invalid column data
func TestUpsertInvalidColumnData(t *testing.T) {
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
// create and insert
_, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption())
upsertNb := 10
// 1. upsert missing columns
opt := *hp.TNewDataOption()
pkColumn, vecColumn := hp.GenColumnData(upsertNb, entity.FieldTypeInt64, opt), hp.GenColumnData(upsertNb, entity.FieldTypeFloatVector, opt)
_, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn))
common.CheckErr(t, err, false, fmt.Sprintf("field %s not passed", common.DefaultFloatVecFieldName))
// 2. upsert extra a column
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, vecColumn, vecColumn))
common.CheckErr(t, err, false, fmt.Sprintf("duplicated column %s found", common.DefaultFloatVecFieldName))
// 3. upsert vector has different dim
dimColumn := hp.GenColumnData(upsertNb, entity.FieldTypeFloatVector, *hp.TNewDataOption().TWithDim(64))
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, dimColumn))
common.CheckErr(t, err, false, fmt.Sprintf("params column %s vector dim 64 not match collection definition, which has dim of %d",
common.DefaultFloatVecFieldName, common.DefaultDim))
// 4. different columns has different length
diffLenColumn := hp.GenColumnData(upsertNb+1, entity.FieldTypeFloatVector, opt)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, diffLenColumn))
common.CheckErr(t, err, false, "column size not match")
// 5. column type different with schema
varColumn := hp.GenColumnData(upsertNb, entity.FieldTypeVarChar, opt)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, varColumn, vecColumn))
common.CheckErr(t, err, false, "field varchar does not exist in collection")
// 6. empty column
pkColumnEmpty, vecColumnEmpty := hp.GenColumnData(0, entity.FieldTypeInt64, opt), hp.GenColumnData(0, entity.FieldTypeFloatVector, opt)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumnEmpty, vecColumnEmpty))
common.CheckErr(t, err, false, "num_rows should be greater than 0")
// 6. empty column
pkColumnEmpty, vecColumnEmpty = hp.GenColumnData(0, entity.FieldTypeInt64, opt), hp.GenColumnData(10, entity.FieldTypeFloatVector, opt)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumnEmpty, vecColumnEmpty))
common.CheckErr(t, err, false, "invalid parameter[expected=need long int array][actual=got nil]")
// 6. empty column
pkColumnEmpty, vecColumnEmpty = hp.GenColumnData(10, entity.FieldTypeInt64, opt), hp.GenColumnData(0, entity.FieldTypeFloatVector, opt)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumnEmpty, vecColumnEmpty))
common.CheckErr(t, err, false, "column size not match")
}
func TestUpsertDynamicField(t *testing.T) {
// enable dynamic field and insert dynamic column
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
// create -> insert [0, 3000) -> flush -> index -> load
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64Vec), hp.TNewFieldsOption(), hp.TNewSchemaOption().TWithEnableDynamicField(true))
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, common.DefaultNb), hp.TNewDataOption())
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
// verify that dynamic field exists
upsertNb := 10
resSet, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s < %d", common.DefaultDynamicNumberField, upsertNb)).
WithOutputFields([]string{common.DefaultDynamicFieldName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
require.Equal(t, upsertNb, resSet.GetColumn(common.DefaultDynamicFieldName).Len())
// 1. upsert exist pk without dynamic column
opt := *hp.TNewDataOption()
pkColumn, vecColumn := hp.GenColumnData(upsertNb, entity.FieldTypeInt64, opt), hp.GenColumnData(upsertNb, entity.FieldTypeFloatVector, opt)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, vecColumn))
common.CheckErr(t, err, true)
// query and gets empty
resSet, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s < %d", common.DefaultDynamicNumberField, upsertNb)).
WithOutputFields([]string{common.DefaultDynamicFieldName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
require.Equal(t, 0, resSet.GetColumn(common.DefaultDynamicFieldName).Len())
// 2. upsert not exist pk with dynamic column -> field dynamicNumber does not exist in collection
opt.TWithStart(common.DefaultNb)
pkColumn2, vecColumn2 := hp.GenColumnData(upsertNb, entity.FieldTypeInt64, opt), hp.GenColumnData(upsertNb, entity.FieldTypeFloatVector, opt)
dynamicColumns := hp.GenDynamicColumnData(common.DefaultNb, upsertNb)
_, err = mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn2, vecColumn2).WithColumns(dynamicColumns...))
common.CheckErr(t, err, true)
// query and gets dynamic field
resSet, err = mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s >= %d", common.DefaultDynamicNumberField, common.DefaultNb)).
WithOutputFields([]string{common.DefaultDynamicFieldName}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.EqualColumn(t, hp.MergeColumnsToDynamic(upsertNb, dynamicColumns, common.DefaultDynamicFieldName), resSet.GetColumn(common.DefaultDynamicFieldName))
}
func TestUpsertWithoutLoading(t *testing.T) {
ctx := hp.CreateContext(t, time.Second*common.DefaultTimeout)
mc := createDefaultMilvusClient(ctx, t)
// create and insert
prepare, schema := hp.CollPrepare.CreateCollection(ctx, t, mc, hp.NewCreateCollectionParams(hp.Int64VecJSON), hp.TNewFieldsOption(), hp.TNewSchemaOption())
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, common.DefaultNb), hp.TNewDataOption())
// upsert
upsertNb := 10
opt := *hp.TNewDataOption()
pkColumn, jsonColumn, vecColumn := hp.GenColumnData(upsertNb, entity.FieldTypeInt64, opt), hp.GenColumnData(upsertNb, entity.FieldTypeJSON, opt), hp.GenColumnData(upsertNb, entity.FieldTypeFloatVector, opt)
_, err := mc.Upsert(ctx, client.NewColumnBasedInsertOption(schema.CollectionName).WithColumns(pkColumn, jsonColumn, vecColumn))
common.CheckErr(t, err, true)
// index -> load
prepare.CreateIndex(ctx, t, mc, hp.NewIndexParams(schema))
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
// query and verify
resSet, err := mc.Query(ctx, client.NewQueryOption(schema.CollectionName).WithFilter(fmt.Sprintf("%s < %d", common.DefaultInt64FieldName, upsertNb)).
WithOutputFields([]string{"*"}).WithConsistencyLevel(entity.ClStrong))
common.CheckErr(t, err, true)
common.CheckQueryResult(t, []column.Column{pkColumn, jsonColumn, vecColumn}, resSet.Fields)
}
func TestUpsertPartitionKeyCollection(t *testing.T) {
t.Skip("waiting gen partition key field")
}