mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-03 01:12:25 +08:00
merge 0.6.0
This commit is contained in:
commit
ce10d1c86e
@ -21,6 +21,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- \#440 - Server cannot startup with gpu_resource_config.enable=false in GPU version
|
||||
- \#458 - Index data is not compatible between 0.5 and 0.6
|
||||
- \#465 - Server hang caused by searching with nsg index
|
||||
- \#485 - Increase code coverage rate
|
||||
- \#486 - gpu no usage during index building
|
||||
- \#497 - CPU-version search performance decreased
|
||||
- \#504 - The code coverage rate of core/src/scheduler/optimizer is too low
|
||||
@ -33,7 +34,10 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- \#533 - NSG build failed with MetricType Inner Product
|
||||
- \#543 - client raise exception in shards when search results is empty
|
||||
- \#545 - Avoid dead circle of build index thread when error occurs
|
||||
- \#552 - Server down during building index_type: IVF_PQ using GPU-edition
|
||||
- \#561 - Milvus server should report exception/error message or terminate on mysql metadata backend error
|
||||
- \#599 - Build index log is incorrect
|
||||
- \#602 - Optimizer specify wrong gpu_id
|
||||
- \#606 - No log generated during building index with CPU
|
||||
|
||||
## Feature
|
||||
@ -46,11 +50,13 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- \#420 - Update shards merge part to match v0.5.3
|
||||
- \#488 - Add log in scheduler/optimizer
|
||||
- \#502 - C++ SDK support IVFPQ and SPTAG
|
||||
- \#560 - Add version in server config file
|
||||
|
||||
## Improvement
|
||||
- \#255 - Add ivfsq8 test report detailed version
|
||||
- \#260 - C++ SDK README
|
||||
- \#266 - Rpc request source code refactor
|
||||
- \#274 - Logger the time cost during preloading data
|
||||
- \#275 - Rename C++ SDK IndexType
|
||||
- \#284 - Change C++ SDK to shared library
|
||||
- \#306 - Use int64 for all config integer
|
||||
|
||||
9
ci/jenkins/Jenkinsfile
vendored
9
ci/jenkins/Jenkinsfile
vendored
@ -17,7 +17,7 @@ pipeline {
|
||||
}
|
||||
|
||||
parameters{
|
||||
choice choices: ['Release', 'Debug'], description: '', name: 'BUILD_TYPE'
|
||||
choice choices: ['Release', 'Debug'], description: 'Build Type', name: 'BUILD_TYPE'
|
||||
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
|
||||
string defaultValue: 'ba070c98-c8cc-4f7c-b657-897715f359fc', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
|
||||
string defaultValue: 'http://192.168.1.202/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
|
||||
@ -27,9 +27,8 @@ pipeline {
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${BRANCH_NAME}"
|
||||
JOBNAMES = env.JOB_NAME.split('/')
|
||||
PIPELINE_NAME = "${JOBNAMES[0]}"
|
||||
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
|
||||
PIPELINE_NAME = "${env.JOB_NAME.contains('/') ? env.JOB_NAME.getAt(0..(env.JOB_NAME.indexOf('/') - 1)) : env.JOB_NAME}"
|
||||
}
|
||||
|
||||
stages {
|
||||
@ -102,7 +101,7 @@ pipeline {
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
container('publish-images') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
|
||||
477
ci/jenkins/internalJenkinsfile.groovy
Normal file
477
ci/jenkins/internalJenkinsfile.groovy
Normal file
@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env groovy
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
parameters{
|
||||
choice choices: ['Release', 'Debug'], description: 'Build Type', name: 'BUILD_TYPE'
|
||||
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
|
||||
string defaultValue: 'a54e38ef-c424-4ea9-9224-b25fc20e3924', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
|
||||
string defaultValue: 'http://192.168.1.201/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
|
||||
string defaultValue: '76fd48ab-2b8e-4eed-834d-2eefd23bb3a6', description: 'JFROG CREDENTIALS ID', name: 'JFROG_CREDENTIALS_ID', trim: true
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
|
||||
PIPELINE_NAME = "${env.JOB_NAME.contains('/') ? env.JOB_NAME.getAt(0..(env.JOB_NAME.indexOf('/') - 1)) : env.JOB_NAME}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 18.04 x86_64") {
|
||||
environment {
|
||||
OS_NAME = "ubuntu18.04"
|
||||
CPU_ARCH = "amd64"
|
||||
}
|
||||
|
||||
parallel {
|
||||
stage ("GPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "gpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-gpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-gpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-gpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: gpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-gpu-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: BUILD_ENV_IMAGE_ID
|
||||
value: "da9023b0f858f072672f86483a869aa87e90a5140864f89e5a012ec766d96dea"
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "24Gi"
|
||||
cpu: "8.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/internalCoverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-images
|
||||
image: registry.zilliz.com/library/docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
environment {
|
||||
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-")
|
||||
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
|
||||
}
|
||||
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-test-env
|
||||
image: registry.zilliz.com/milvus/milvus-test-env:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("CPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "cpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-cpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-cpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-cpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: cpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-cpu-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: BUILD_ENV_IMAGE_ID
|
||||
value: "23476391bec80c64f10d44a6370c73c71f011a6b95114b10ff82a60e771e11c7"
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "24Gi"
|
||||
cpu: "8.0"
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/internalCoverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-images
|
||||
image: registry.zilliz.com/library/docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
environment {
|
||||
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-")
|
||||
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
|
||||
}
|
||||
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-test-env
|
||||
image: registry.zilliz.com/milvus/milvus-test-env:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
boolean isTimeTriggeredBuild() {
|
||||
if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -3,9 +3,9 @@ timeout(time: 60, unit: 'MINUTES') {
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
def checkResult = sh(script: "./check_ccache.sh -l ${params.JFROG_ARTFACTORY_URL}/ccache", returnStatus: true)
|
||||
if ("${env.BINRARY_VERSION}" == "gpu") {
|
||||
sh ". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -g -u -c"
|
||||
sh ". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -g -x -u -c"
|
||||
} else {
|
||||
sh ". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -m -u -c"
|
||||
sh ". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -u -c"
|
||||
}
|
||||
sh "./update_ccache.sh -l ${params.JFROG_ARTFACTORY_URL}/ccache -u ${USERNAME} -p ${PASSWORD}"
|
||||
}
|
||||
|
||||
6
ci/jenkins/step/internalCoverage.groovy
Normal file
6
ci/jenkins/step/internalCoverage.groovy
Normal file
@ -0,0 +1,6 @@
|
||||
timeout(time: 30, unit: 'MINUTES') {
|
||||
dir ("ci/scripts") {
|
||||
sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP"
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ check_ccache() {
|
||||
echo "fetching ${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz"
|
||||
wget -q --method HEAD "${ARTIFACTORY_URL}/${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz"
|
||||
if [[ $? == 0 ]];then
|
||||
wget "${ARTIFACTORY_URL}/${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz" && \
|
||||
wget -q "${ARTIFACTORY_URL}/${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz" && \
|
||||
mkdir -p ${CCACHE_DIRECTORY} && \
|
||||
tar zxf ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz -C ${CCACHE_DIRECTORY} && \
|
||||
rm ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
# Default values are used when you make no changes to the following parameters.
|
||||
|
||||
version: 0.1 # config version
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # milvus server port, must in range [1025, 65534]
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
# Default values are used when you make no changes to the following parameters.
|
||||
|
||||
version: 0.1 # config version
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # milvus server port, must in range [1025, 65534]
|
||||
|
||||
@ -182,7 +182,7 @@ DBImpl::PreloadTable(const std::string& table_id) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
// get all table files from parent table
|
||||
// step 1: get all table files from parent table
|
||||
meta::DatesT dates;
|
||||
std::vector<size_t> ids;
|
||||
meta::TableFilesSchema files_array;
|
||||
@ -191,7 +191,7 @@ DBImpl::PreloadTable(const std::string& table_id) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// get files from partition tables
|
||||
// step 2: get files from partition tables
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
@ -203,6 +203,10 @@ DBImpl::PreloadTable(const std::string& table_id) {
|
||||
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
int64_t available_size = cache_total - cache_usage;
|
||||
|
||||
// step 3: load file one by one
|
||||
ENGINE_LOG_DEBUG << "Begin pre-load table:" + table_id + ", totally " << files_array.size()
|
||||
<< " files need to be pre-loaded";
|
||||
TimeRecorderAuto rc("Pre-load table:" + table_id);
|
||||
for (auto& file : files_array) {
|
||||
ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
|
||||
(MetricType)file.metric_type_, file.nlist_);
|
||||
@ -213,10 +217,12 @@ DBImpl::PreloadTable(const std::string& table_id) {
|
||||
|
||||
size += engine->PhysicalSize();
|
||||
if (size > available_size) {
|
||||
ENGINE_LOG_DEBUG << "Pre-load canceled since cache almost full";
|
||||
return Status(SERVER_CACHE_FULL, "Cache is full");
|
||||
} else {
|
||||
try {
|
||||
// step 1: load index
|
||||
std::string msg = "Pre-loaded file: " + file.file_id_ + " size: " + std::to_string(file.file_size_);
|
||||
TimeRecorderAuto rc_1(msg);
|
||||
engine->Load(true);
|
||||
} catch (std::exception& ex) {
|
||||
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
|
||||
|
||||
@ -112,10 +112,12 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
||||
index = GetVecIndexFactory(IndexType::NSG_MIX);
|
||||
break;
|
||||
}
|
||||
#ifdef CUSTOMIZATION
|
||||
case EngineType::FAISS_IVFSQ8H: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_HYBRID);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case EngineType::FAISS_PQ: {
|
||||
#ifdef MILVUS_CPU_VERSION
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_CPU);
|
||||
|
||||
@ -1615,29 +1615,29 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector<int>&
|
||||
}
|
||||
}
|
||||
|
||||
std::string msg = "Get table files by type. ";
|
||||
std::string msg = "Get table files by type.";
|
||||
for (int file_type : file_types) {
|
||||
switch (file_type) {
|
||||
case (int)TableFileSchema::RAW:
|
||||
msg = msg + "raw files:" + std::to_string(raw_count);
|
||||
msg = msg + " raw files:" + std::to_string(raw_count);
|
||||
break;
|
||||
case (int)TableFileSchema::NEW:
|
||||
msg = msg + "new files:" + std::to_string(raw_count);
|
||||
msg = msg + " new files:" + std::to_string(new_count);
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_MERGE:
|
||||
msg = msg + "new_merge files:" + std::to_string(raw_count);
|
||||
msg = msg + " new_merge files:" + std::to_string(new_merge_count);
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_INDEX:
|
||||
msg = msg + "new_index files:" + std::to_string(raw_count);
|
||||
msg = msg + " new_index files:" + std::to_string(new_index_count);
|
||||
break;
|
||||
case (int)TableFileSchema::TO_INDEX:
|
||||
msg = msg + "to_index files:" + std::to_string(raw_count);
|
||||
msg = msg + " to_index files:" + std::to_string(to_index_count);
|
||||
break;
|
||||
case (int)TableFileSchema::INDEX:
|
||||
msg = msg + "index files:" + std::to_string(raw_count);
|
||||
msg = msg + " index files:" + std::to_string(index_count);
|
||||
break;
|
||||
case (int)TableFileSchema::BACKUP:
|
||||
msg = msg + "backup files:" + std::to_string(raw_count);
|
||||
msg = msg + " backup files:" + std::to_string(backup_count);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
||||
@ -1157,29 +1157,29 @@ SqliteMetaImpl::FilesByType(const std::string& table_id,
|
||||
table_files.emplace_back(file_schema);
|
||||
}
|
||||
|
||||
std::string msg = "Get table files by type. ";
|
||||
std::string msg = "Get table files by type.";
|
||||
for (int file_type : file_types) {
|
||||
switch (file_type) {
|
||||
case (int)TableFileSchema::RAW:
|
||||
msg = msg + "raw files:" + std::to_string(raw_count);
|
||||
msg = msg + " raw files:" + std::to_string(raw_count);
|
||||
break;
|
||||
case (int)TableFileSchema::NEW:
|
||||
msg = msg + "new files:" + std::to_string(raw_count);
|
||||
msg = msg + " new files:" + std::to_string(new_count);
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_MERGE:
|
||||
msg = msg + "new_merge files:" + std::to_string(raw_count);
|
||||
msg = msg + " new_merge files:" + std::to_string(new_merge_count);
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_INDEX:
|
||||
msg = msg + "new_index files:" + std::to_string(raw_count);
|
||||
msg = msg + " new_index files:" + std::to_string(new_index_count);
|
||||
break;
|
||||
case (int)TableFileSchema::TO_INDEX:
|
||||
msg = msg + "to_index files:" + std::to_string(raw_count);
|
||||
msg = msg + " to_index files:" + std::to_string(to_index_count);
|
||||
break;
|
||||
case (int)TableFileSchema::INDEX:
|
||||
msg = msg + "index files:" + std::to_string(raw_count);
|
||||
msg = msg + " index files:" + std::to_string(index_count);
|
||||
break;
|
||||
case (int)TableFileSchema::BACKUP:
|
||||
msg = msg + "backup files:" + std::to_string(raw_count);
|
||||
msg = msg + " backup files:" + std::to_string(backup_count);
|
||||
break;
|
||||
default:break;
|
||||
}
|
||||
|
||||
@ -42,7 +42,8 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test {
|
||||
SetUp() override {
|
||||
// Init_with_default();
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
|
||||
int64_t MB = 1024 * 1024;
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, MB * 200, MB * 600, 1);
|
||||
#endif
|
||||
Generate(256, 1000000 / 100, 1);
|
||||
index_ = std::make_shared<knowhere::NSG>();
|
||||
|
||||
@ -60,7 +60,7 @@ FaissFlatPass::Run(const TaskPtr& task) {
|
||||
auto best_device_id = count_ % gpus.size();
|
||||
SERVER_LOG_DEBUG << "FaissFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!";
|
||||
count_++;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
task->label() = label;
|
||||
|
||||
@ -63,7 +63,7 @@ FaissIVFFlatPass::Run(const TaskPtr& task) {
|
||||
SERVER_LOG_DEBUG << "FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id
|
||||
<< " to search!";
|
||||
count_++;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
task->label() = label;
|
||||
|
||||
@ -61,7 +61,7 @@ FaissIVFSQ8HPass::Run(const TaskPtr& task) {
|
||||
SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu" << best_device_id
|
||||
<< " to search!";
|
||||
count_++;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
task->label() = label;
|
||||
|
||||
@ -63,7 +63,7 @@ FaissIVFSQ8Pass::Run(const TaskPtr& task) {
|
||||
SERVER_LOG_DEBUG << "FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu" << best_device_id
|
||||
<< " to search!";
|
||||
count_++;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
task->label() = label;
|
||||
|
||||
@ -85,18 +85,18 @@ struct RowRecord {
|
||||
* @brief TopK query result
|
||||
*/
|
||||
struct QueryResult {
|
||||
std::vector<int64_t> ids;
|
||||
std::vector<float> distances;
|
||||
std::vector<int64_t> ids; ///< Query ids result
|
||||
std::vector<float> distances; ///< Query distances result
|
||||
};
|
||||
using TopKQueryResult = std::vector<QueryResult>;
|
||||
using TopKQueryResult = std::vector<QueryResult>; ///< Topk query result
|
||||
|
||||
/**
|
||||
* @brief index parameters
|
||||
*/
|
||||
struct IndexParam {
|
||||
std::string table_name;
|
||||
IndexType index_type;
|
||||
int32_t nlist;
|
||||
std::string table_name; ///< Table name for create index
|
||||
IndexType index_type; ///< Create index type
|
||||
int32_t nlist; ///< Index nlist
|
||||
};
|
||||
|
||||
/**
|
||||
@ -142,8 +142,8 @@ class Connection {
|
||||
/**
|
||||
* @brief Connect
|
||||
*
|
||||
* Connect function should be called before any operations
|
||||
* Server will be connected after Connect return OK
|
||||
* This method is used to connect server.
|
||||
* Connect function should be called before any operations.
|
||||
*
|
||||
* @param param, use to provide server information
|
||||
*
|
||||
@ -156,10 +156,10 @@ class Connection {
|
||||
/**
|
||||
* @brief Connect
|
||||
*
|
||||
* Connect function should be called before any operations
|
||||
* Server will be connected after Connect return OK
|
||||
* This method is used to connect server.
|
||||
* Connect function should be called before any operations.
|
||||
*
|
||||
* @param uri, use to provide server information, example: milvus://ipaddress:port
|
||||
* @param uri, use to provide server uri, example: milvus://ipaddress:port
|
||||
*
|
||||
* @return Indicate if connect is successful
|
||||
*/
|
||||
@ -169,7 +169,7 @@ class Connection {
|
||||
/**
|
||||
* @brief connected
|
||||
*
|
||||
* Connection status.
|
||||
* This method is used to test whether server is connected.
|
||||
*
|
||||
* @return Indicate if connection status
|
||||
*/
|
||||
@ -179,7 +179,7 @@ class Connection {
|
||||
/**
|
||||
* @brief Disconnect
|
||||
*
|
||||
* Server will be disconnected after Disconnect return OK
|
||||
* This method is used to disconnect server.
|
||||
*
|
||||
* @return Indicate if disconnect is successful
|
||||
*/
|
||||
@ -189,7 +189,7 @@ class Connection {
|
||||
/**
|
||||
* @brief Create table method
|
||||
*
|
||||
* This method is used to create table
|
||||
* This method is used to create table.
|
||||
*
|
||||
* @param param, use to provide table information to be created.
|
||||
*
|
||||
@ -201,7 +201,7 @@ class Connection {
|
||||
/**
|
||||
* @brief Test table existence method
|
||||
*
|
||||
* This method is used to create table
|
||||
* This method is used to create table.
|
||||
*
|
||||
* @param table_name, target table's name.
|
||||
*
|
||||
@ -211,13 +211,13 @@ class Connection {
|
||||
HasTable(const std::string& table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Delete table method
|
||||
* @brief Drop table method
|
||||
*
|
||||
* This method is used to delete table(and its partitions).
|
||||
* This method is used to drop table(and its partitions).
|
||||
*
|
||||
* @param table_name, target table's name.
|
||||
*
|
||||
* @return Indicate if table is delete successfully.
|
||||
* @return Indicate if table is drop successfully.
|
||||
*/
|
||||
virtual Status
|
||||
DropTable(const std::string& table_name) = 0;
|
||||
@ -239,14 +239,17 @@ class Connection {
|
||||
CreateIndex(const IndexParam& index_param) = 0;
|
||||
|
||||
/**
|
||||
* @brief Add vector to table
|
||||
* @brief Insert vector to table
|
||||
*
|
||||
* This method is used to add vector array to table.
|
||||
* This method is used to insert vector array to table.
|
||||
*
|
||||
* @param table_name, target table's name.
|
||||
* @param partition_tag, target partition's tag, keep empty if no partition.
|
||||
* @param record_array, vector array is inserted.
|
||||
* @param id_array, after inserted every vector is given a id.
|
||||
* @param id_array,
|
||||
* specify id for each vector,
|
||||
* if this array is empty, milvus will generate unique id for each vector,
|
||||
* and return all ids by this parameter.
|
||||
*
|
||||
* @return Indicate if vector array are inserted successfully
|
||||
*/
|
||||
@ -259,11 +262,12 @@ class Connection {
|
||||
*
|
||||
* This method is used to query vector in table.
|
||||
*
|
||||
* @param table_name, target table's name, keep empty if no partition.
|
||||
* @param partition_tags, target partitions.
|
||||
* @param table_name, target table's name.
|
||||
* @param partition_tags, target partitions, keep empty if no partition.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, time ranges, if not specified, will search in whole table
|
||||
* @param query_range_array, [deprecated] time ranges, if not specified, will search in whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
* @param nprobe, the number of centroids choose to search.
|
||||
* @param topk_query_result_array, result array.
|
||||
*
|
||||
* @return Indicate if query is successful.
|
||||
@ -304,7 +308,7 @@ class Connection {
|
||||
*
|
||||
* This method is used to list all tables.
|
||||
*
|
||||
* @param table_array, all tables are push into the array.
|
||||
* @param table_array, all tables in database.
|
||||
*
|
||||
* @return Indicate if this operation is successful.
|
||||
*/
|
||||
@ -346,12 +350,13 @@ class Connection {
|
||||
*
|
||||
* This method is internal used.
|
||||
*
|
||||
* @return Server status.
|
||||
* @return Task information in tasktables.
|
||||
*/
|
||||
virtual std::string
|
||||
DumpTaskTables() const = 0;
|
||||
|
||||
/**
|
||||
* [deprecated]
|
||||
* @brief delete tables by date range
|
||||
*
|
||||
* This method is used to delete table data by date range.
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
#include <iostream>
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "config/YamlConfigMgr.h"
|
||||
@ -33,6 +34,8 @@ namespace server {
|
||||
|
||||
constexpr uint64_t GB = 1UL << 30;
|
||||
|
||||
static const std::unordered_map<std::string, std::string> milvus_config_version_map({{"0.6.0", "0.1"}});
|
||||
|
||||
Config&
|
||||
Config::GetInstance() {
|
||||
static Config config_inst;
|
||||
@ -69,6 +72,12 @@ Status
|
||||
Config::ValidateConfig() {
|
||||
Status s;
|
||||
|
||||
std::string config_version;
|
||||
s = GetConfigVersion(config_version);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
/* server config */
|
||||
std::string server_addr;
|
||||
s = GetServerConfigAddress(server_addr);
|
||||
@ -383,6 +392,16 @@ Config::PrintAll() {
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
Status
|
||||
Config::CheckConfigVersion(const std::string& value) {
|
||||
if (milvus_config_version_map.at(MILVUS_VERSION) != value) {
|
||||
std::string msg = "Invalid config version: " + value +
|
||||
". Expected config version: " + milvus_config_version_map.at(MILVUS_VERSION);
|
||||
return Status(SERVER_INVALID_ARGUMENT, msg);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::CheckServerConfigAddress(const std::string& value) {
|
||||
if (!ValidationUtil::ValidateIpAddress(value).ok()) {
|
||||
@ -766,10 +785,14 @@ Config::CheckGpuResourceConfigBuildIndexResources(const std::vector<std::string>
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
ConfigNode&
|
||||
Config::GetConfigNode(const std::string& name) {
|
||||
Config::GetConfigRoot() {
|
||||
ConfigMgr* mgr = YamlConfigMgr::GetInstance();
|
||||
ConfigNode& root_node = mgr->GetRootNode();
|
||||
return root_node.GetChild(name);
|
||||
return mgr->GetRootNode();
|
||||
}
|
||||
|
||||
ConfigNode&
|
||||
Config::GetConfigNode(const std::string& name) {
|
||||
return GetConfigRoot().GetChild(name);
|
||||
}
|
||||
|
||||
Status
|
||||
@ -816,6 +839,12 @@ Config::GetConfigSequenceStr(const std::string& parent_key, const std::string& c
|
||||
return value;
|
||||
}
|
||||
|
||||
Status
|
||||
Config::GetConfigVersion(std::string& value) {
|
||||
value = GetConfigRoot().GetValue(CONFIG_VERSION);
|
||||
return CheckConfigVersion(value);
|
||||
}
|
||||
|
||||
Status
|
||||
Config::GetServerConfigAddress(std::string& value) {
|
||||
value = GetConfigStr(CONFIG_SERVER, CONFIG_SERVER_ADDRESS, CONFIG_SERVER_ADDRESS_DEFAULT);
|
||||
|
||||
@ -28,6 +28,8 @@
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
static const char* CONFIG_VERSION = "version";
|
||||
|
||||
/* server config */
|
||||
static const char* CONFIG_SERVER = "server_config";
|
||||
static const char* CONFIG_SERVER_ADDRESS = "address";
|
||||
@ -115,6 +117,8 @@ class Config {
|
||||
PrintAll();
|
||||
|
||||
private:
|
||||
ConfigNode&
|
||||
GetConfigRoot();
|
||||
ConfigNode&
|
||||
GetConfigNode(const std::string& name);
|
||||
Status
|
||||
@ -125,6 +129,9 @@ class Config {
|
||||
PrintConfigSection(const std::string& config_node_name);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
Status
|
||||
CheckConfigVersion(const std::string& value);
|
||||
|
||||
/* server config */
|
||||
Status
|
||||
CheckServerConfigAddress(const std::string& value);
|
||||
@ -193,6 +200,8 @@ class Config {
|
||||
std::string
|
||||
GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim = ",",
|
||||
const std::string& default_value = "");
|
||||
Status
|
||||
GetConfigVersion(std::string& value);
|
||||
|
||||
public:
|
||||
/* server config */
|
||||
|
||||
@ -96,4 +96,11 @@ TimeRecorder::ElapseFromBegin(const std::string& msg) {
|
||||
return span;
|
||||
}
|
||||
|
||||
TimeRecorderAuto::TimeRecorderAuto(const std::string& header, int64_t log_level) : TimeRecorder(header, log_level) {
|
||||
}
|
||||
|
||||
TimeRecorderAuto::~TimeRecorderAuto() {
|
||||
ElapseFromBegin("totally cost");
|
||||
}
|
||||
|
||||
} // namespace milvus
|
||||
|
||||
@ -28,7 +28,7 @@ class TimeRecorder {
|
||||
public:
|
||||
explicit TimeRecorder(const std::string& header, int64_t log_level = 1);
|
||||
|
||||
~TimeRecorder(); // trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5
|
||||
virtual ~TimeRecorder(); // trace = 0, debug = 1, info = 2, warn = 3, error = 4, critical = 5
|
||||
|
||||
double
|
||||
RecordSection(const std::string& msg);
|
||||
@ -50,4 +50,11 @@ class TimeRecorder {
|
||||
int64_t log_level_;
|
||||
};
|
||||
|
||||
class TimeRecorderAuto : public TimeRecorder {
|
||||
public:
|
||||
explicit TimeRecorderAuto(const std::string& header, int64_t log_level = 1);
|
||||
|
||||
~TimeRecorderAuto();
|
||||
};
|
||||
|
||||
} // namespace milvus
|
||||
|
||||
@ -134,7 +134,7 @@ IVFPQConfAdapter::Match(const TempMetaConf& metaconf) {
|
||||
|
||||
/*
|
||||
* Faiss 1.6
|
||||
* Only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims per sub-quantizer are currently supporte with
|
||||
* Only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims per sub-quantizer are currently supported with
|
||||
* no precomputed codes. Precomputed codes supports any number of dimensions, but will involve memory overheads.
|
||||
*/
|
||||
static std::vector<int64_t> support_dim_per_subquantizer{32, 28, 24, 20, 16, 12, 10, 8, 6, 4, 3, 2, 1};
|
||||
@ -152,7 +152,12 @@ IVFPQConfAdapter::Match(const TempMetaConf& metaconf) {
|
||||
|
||||
if (resset.empty()) {
|
||||
// todo(linxj): throw exception here.
|
||||
return nullptr;
|
||||
WRAPPER_LOG_ERROR << "The dims of PQ is wrong : only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims per sub-"
|
||||
"quantizer are currently supported with no precomputed codes.";
|
||||
throw WrapperException(
|
||||
"The dims of PQ is wrong : only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims "
|
||||
"per sub-quantizer are currently supported with no precomputed codes.");
|
||||
// return nullptr;
|
||||
}
|
||||
static int64_t compression_level = 1; // 1:low, 2:high
|
||||
if (compression_level == 1) {
|
||||
|
||||
@ -59,6 +59,29 @@ TEST_F(EngineTest, FACTORY_TEST) {
|
||||
|
||||
ASSERT_TRUE(engine_ptr != nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
auto engine_ptr = milvus::engine::EngineFactory::Build(
|
||||
512, "/tmp/milvus_index_1", milvus::engine::EngineType::FAISS_PQ, milvus::engine::MetricType::IP, 1024);
|
||||
|
||||
ASSERT_TRUE(engine_ptr != nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
auto engine_ptr = milvus::engine::EngineFactory::Build(
|
||||
512, "/tmp/milvus_index_1", milvus::engine::EngineType::SPTAG_KDT,
|
||||
milvus::engine::MetricType::L2, 1024);
|
||||
|
||||
ASSERT_TRUE(engine_ptr != nullptr);
|
||||
}
|
||||
|
||||
{
|
||||
auto engine_ptr = milvus::engine::EngineFactory::Build(
|
||||
512, "/tmp/milvus_index_1", milvus::engine::EngineType::SPTAG_KDT,
|
||||
milvus::engine::MetricType::L2, 1024);
|
||||
|
||||
ASSERT_TRUE(engine_ptr != nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(EngineTest, ENGINE_IMPL_TEST) {
|
||||
@ -69,7 +92,7 @@ TEST_F(EngineTest, ENGINE_IMPL_TEST) {
|
||||
|
||||
std::vector<float> data;
|
||||
std::vector<int64_t> ids;
|
||||
const int row_count = 10000;
|
||||
const int row_count = 500;
|
||||
data.reserve(row_count * dimension);
|
||||
ids.reserve(row_count);
|
||||
for (int64_t i = 0; i < row_count; i++) {
|
||||
@ -95,5 +118,8 @@ TEST_F(EngineTest, ENGINE_IMPL_TEST) {
|
||||
// ASSERT_TRUE(status.ok());
|
||||
|
||||
auto engine_build = engine_ptr->BuildIndex("/tmp/milvus_index_2", milvus::engine::EngineType::FAISS_IVFSQ8);
|
||||
engine_build = engine_ptr->BuildIndex("/tmp/milvus_index_3", milvus::engine::EngineType::FAISS_PQ);
|
||||
engine_build = engine_ptr->BuildIndex("/tmp/milvus_index_4", milvus::engine::EngineType::SPTAG_KDT);
|
||||
engine_build = engine_ptr->BuildIndex("/tmp/milvus_index_5", milvus::engine::EngineType::SPTAG_BKT);
|
||||
// ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
@ -28,6 +28,8 @@ namespace {
|
||||
static const char* VALID_CONFIG_STR =
|
||||
"# Default values are used when you make no changes to the following parameters.\n"
|
||||
"\n"
|
||||
"version: 0.1"
|
||||
"\n"
|
||||
"server_config:\n"
|
||||
" address: 0.0.0.0 # milvus server ip address (IPv4)\n"
|
||||
" port: 19530 # port range: 1025 ~ 65534\n"
|
||||
|
||||
@ -182,6 +182,7 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) {
|
||||
TEST(whatever, test_config) {
|
||||
milvus::engine::TempMetaConf conf;
|
||||
conf.nprobe = 16;
|
||||
conf.dim = 128;
|
||||
auto nsg_conf = std::make_shared<milvus::engine::NSGConfAdapter>();
|
||||
nsg_conf->Match(conf);
|
||||
nsg_conf->MatchSearch(conf, milvus::engine::IndexType::NSG_MIX);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user