mirror of
https://gitee.com/milvus-io/milvus.git
synced 2026-01-03 09:22:30 +08:00
Merge remote-tracking branch 'upstream/branch-0.3.1' into branch-0.3.1
Former-commit-id: 24485d8457a8bfaaaf858d2aae5ae441456b1bc1
This commit is contained in:
commit
5d714dfb99
@ -15,3 +15,5 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
### Task
|
||||
|
||||
- MS-1 - Add CHANGELOG.md
|
||||
- MS-161 - Add CI / CD Module to Milvus Project
|
||||
- MS-202 - Add Milvus Jenkins project email notification
|
||||
|
||||
10
ci/function/file_transfer.groovy
Normal file
10
ci/function/file_transfer.groovy
Normal file
@ -0,0 +1,10 @@
|
||||
def FileTransfer (sourceFiles, remoteDirectory, remoteIP, protocol = "ftp", makeEmptyDirs = true) {
|
||||
if (protocol == "ftp") {
|
||||
ftpPublisher masterNodeName: '', paramPublish: [parameterName: ''], alwaysPublishFromMaster: false, continueOnError: false, failOnError: true, publishers: [
|
||||
[configName: "${remoteIP}", transfers: [
|
||||
[asciiMode: false, cleanRemote: false, excludes: '', flatten: false, makeEmptyDirs: "${makeEmptyDirs}", noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: "${remoteDirectory}", remoteDirectorySDF: false, removePrefix: '', sourceFiles: "${sourceFiles}"]], usePromotionTimestamp: true, useWorkspaceInPromotion: false, verbose: true
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
return this
|
||||
13
ci/jenkinsfile/cleanup_dev.groovy
Normal file
13
ci/jenkinsfile/cleanup_dev.groovy
Normal file
@ -0,0 +1,13 @@
|
||||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
||||
13
ci/jenkinsfile/cluster_cleanup_dev.groovy
Normal file
13
ci/jenkinsfile/cluster_cleanup_dev.groovy
Normal file
@ -0,0 +1,13 @@
|
||||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
||||
22
ci/jenkinsfile/cluster_deploy2dev.groovy
Normal file
22
ci/jenkinsfile/cluster_deploy2dev.groovy
Normal file
@ -0,0 +1,22 @@
|
||||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-cluster") {
|
||||
sh "helm install --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.3.1 . "
|
||||
}
|
||||
}
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def result = sh script: "nc -z -w 3 ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local 19530", returnStatus: true
|
||||
return !result
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
throw exc
|
||||
}
|
||||
|
||||
12
ci/jenkinsfile/cluster_dev_test.groovy
Normal file
12
ci/jenkinsfile/cluster_dev_test.groovy
Normal file
@ -0,0 +1,12 @@
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=cluster_test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
22
ci/jenkinsfile/deploy2dev.groovy
Normal file
22
ci/jenkinsfile/deploy2dev.groovy
Normal file
@ -0,0 +1,22 @@
|
||||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} --version 0.3.1 ."
|
||||
}
|
||||
}
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def result = sh script: "nc -z -w 3 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.kube-opt.svc.cluster.local 19530", returnStatus: true
|
||||
return !result
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
throw exc
|
||||
}
|
||||
|
||||
12
ci/jenkinsfile/dev_test.groovy
Normal file
12
ci/jenkinsfile/dev_test.groovy
Normal file
@ -0,0 +1,12 @@
|
||||
timeout(time: 10, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.kube-opt.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
20
ci/jenkinsfile/milvus_build.groovy
Normal file
20
ci/jenkinsfile/milvus_build.groovy
Normal file
@ -0,0 +1,20 @@
|
||||
container('milvus-build-env') {
|
||||
timeout(time: 20, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("cpp") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
sh "./build.sh -t ${params.BUILD_TYPE} -u -c"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
20
ci/jenkinsfile/milvus_build_no_ut.groovy
Normal file
20
ci/jenkinsfile/milvus_build_no_ut.groovy
Normal file
@ -0,0 +1,20 @@
|
||||
container('milvus-build-env') {
|
||||
timeout(time: 20, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("cpp") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
sh "./build.sh -t ${params.BUILD_TYPE}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
ci/jenkinsfile/notify.groovy
Normal file
15
ci/jenkinsfile/notify.groovy
Normal file
@ -0,0 +1,15 @@
|
||||
def notify() {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
return this
|
||||
|
||||
44
ci/jenkinsfile/packaged_milvus.groovy
Normal file
44
ci/jenkinsfile/packaged_milvus.groovy
Normal file
@ -0,0 +1,44 @@
|
||||
container('milvus-build-env') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("milvus_engine") {
|
||||
dir ("cpp") {
|
||||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
error("Milvus binary directory don't exists!")
|
||||
}
|
||||
}
|
||||
|
||||
gitlabCommitStatus(name: 'Packaged Engine lcov') {
|
||||
if (fileExists('lcov_out')) {
|
||||
try {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("lcov_out/", "${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus lcov out Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}/lcov_out/\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
|
||||
error("Milvus lcov out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
26
ci/jenkinsfile/packaged_milvus_no_ut.groovy
Normal file
26
ci/jenkinsfile/packaged_milvus_no_ut.groovy
Normal file
@ -0,0 +1,26 @@
|
||||
container('milvus-build-env') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("milvus_engine") {
|
||||
dir ("cpp") {
|
||||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
error("Milvus binary directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
32
ci/jenkinsfile/publish_docker.groovy
Normal file
32
ci/jenkinsfile/publish_docker.groovy
Normal file
@ -0,0 +1,32 @@
|
||||
container('publish-docker') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Publish Engine Docker') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_build") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("docker/deploy/ubuntu16.04/free_version") {
|
||||
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
try {
|
||||
docker.withRegistry('https://registry.zilliz.com', 'a54e38ef-c424-4ea9-9224-b25fc20e3924') {
|
||||
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
|
||||
customImage.push()
|
||||
}
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
|
||||
throw exc
|
||||
} finally {
|
||||
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
|
||||
echo 'Publish docker failed!'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
ci/jenkinsfile/upload_dev_cluster_test_out.groovy
Normal file
14
ci/jenkinsfile/upload_dev_cluster_test_out.groovy
Normal file
@ -0,0 +1,14 @@
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('cluster_test_out')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("cluster_test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13
ci/jenkinsfile/upload_dev_test_out.groovy
Normal file
13
ci/jenkinsfile/upload_dev_test_out.groovy
Normal file
@ -0,0 +1,13 @@
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('test_out')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
365
ci/main_jenkinsfile
Normal file
365
ci/main_jenkinsfile
Normal file
@ -0,0 +1,365 @@
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-build-env'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.10'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Cluster") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
365
ci/main_jenkinsfile_no_ut
Normal file
365
ci/main_jenkinsfile_no_ut
Normal file
@ -0,0 +1,365 @@
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-build-env'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.10'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build_no_ut.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus_no_ut.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Cluster") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
363
ci/nightly_main_jenkinsfile
Normal file
363
ci/nightly_main_jenkinsfile
Normal file
@ -0,0 +1,363 @@
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, \"yyyyMMdd\"}' : '${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}'}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
containerTemplate {
|
||||
name 'milvus-build-env'
|
||||
image 'registry.zilliz.com/milvus/milvus-build-env:v0.10'
|
||||
ttyEnabled true
|
||||
command 'cat'
|
||||
}
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Cluster") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Cluster CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13
ci/pod_containers/milvus-engine-build.yaml
Normal file
13
ci/pod_containers/milvus-engine-build.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.9
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
13
ci/pod_containers/milvus-testframework.yaml
Normal file
13
ci/pod_containers/milvus-testframework.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: testframework
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
22
ci/pod_containers/publish-docker.yaml
Normal file
22
ci/pod_containers/publish-docker.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
@ -11,13 +11,21 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- MS-149 - Fixed searching only one index file issue in distributed mode
|
||||
- MS-153 - fix c_str error when connecting to MySQL
|
||||
- MS-157 - fix changelog
|
||||
- MS-190 - use env variable to switch mem manager and fix cmake
|
||||
|
||||
## Improvement
|
||||
- MS-156 - Add unittest for merge result functions
|
||||
|
||||
- MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl
|
||||
- MS-204 - Support multi db_path
|
||||
- MS-206 - Support SQ8 index type
|
||||
- MS-208 - Add buildinde interface for C++ SDK
|
||||
- MS-212 - Support Inner product metric type
|
||||
|
||||
## New Feature
|
||||
- MS-195 - Add nlist and use_blas_threshold conf
|
||||
|
||||
## New Feature
|
||||
- MS-180 - Add new mem manager
|
||||
|
||||
## Task
|
||||
|
||||
@ -72,6 +80,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
- MS-144 - Add nprobe config
|
||||
- MS-147 - Enable IVF
|
||||
|
||||
- MS-130 - Add prometheus_test
|
||||
## Task
|
||||
- MS-74 - Change README.md in cpp
|
||||
- MS-88 - Add support for arm architecture
|
||||
|
||||
@ -525,7 +525,7 @@ if(MILVUS_BOOST_VENDORED)
|
||||
""
|
||||
${EP_LOG_OPTIONS})
|
||||
set(Boost_INCLUDE_DIR "${BOOST_PREFIX}")
|
||||
set(Boost_INCLUDE_DIRS "${BOOST_INCLUDE_DIR}")
|
||||
set(Boost_INCLUDE_DIRS "${Boost_INCLUDE_DIR}")
|
||||
add_dependencies(boost_system_static boost_ep)
|
||||
add_dependencies(boost_filesystem_static boost_ep)
|
||||
add_dependencies(boost_serialization_static boost_ep)
|
||||
|
||||
@ -6,6 +6,7 @@ server_config:
|
||||
|
||||
db_config:
|
||||
db_path: @MILVUS_DB_PATH@ # milvus data storage path
|
||||
db_slave_path: # secondry data storage path, split by semicolon
|
||||
|
||||
# URI format: dialect://username:password@host:port/database
|
||||
# All parts except dialect are optional, but you MUST include the delimiters
|
||||
@ -15,6 +16,8 @@ db_config:
|
||||
index_building_threshold: 1024 # index building trigger threshold, default: 1024, unit: MB
|
||||
archive_disk_threshold: 512 # triger archive action if storage size exceed this value, unit: GB
|
||||
archive_days_threshold: 30 # files older than x days will be archived, unit: day
|
||||
maximum_memory: 4 # maximum memory allowed, default: 4, unit: GB, should be at least 1 GB.
|
||||
# the sum of maximum_memory and cpu_cache_capacity should be less than total memory
|
||||
|
||||
metric_config:
|
||||
is_startup: off # if monitoring start: on, off
|
||||
@ -33,4 +36,7 @@ cache_config: # cache configure
|
||||
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory
|
||||
|
||||
engine_config:
|
||||
nprobe: 10
|
||||
nprobe: 10
|
||||
nlist: 16384
|
||||
use_blas_threshold: 20
|
||||
metric_type: L2 #L2 or Inner Product
|
||||
|
||||
@ -13,6 +13,27 @@ DIR_LCOV_OUTPUT="lcov_out"
|
||||
|
||||
DIR_GCNO="cmake_build"
|
||||
DIR_UNITTEST="milvus/bin"
|
||||
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=Fantast1c
|
||||
MYSQL_HOST='192.168.1.194'
|
||||
MYSQL_PORT='3306'
|
||||
|
||||
MYSQL_DB_NAME=milvus_`date +%s%N`
|
||||
|
||||
function mysql_exc()
|
||||
{
|
||||
cmd=$1
|
||||
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "mysql $cmd run failed"
|
||||
fi
|
||||
}
|
||||
|
||||
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
|
||||
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
|
||||
mysql_exc "FLUSH PRIVILEGES;"
|
||||
mysql_exc "USE ${MYSQL_DB_NAME};"
|
||||
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=Fantast1c
|
||||
|
||||
23
cpp/src/db/Constants.h
Normal file
23
cpp/src/db/Constants.h
Normal file
@ -0,0 +1,23 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
|
||||
* Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
const size_t K = 1024UL;
|
||||
const size_t M = K * K;
|
||||
const size_t G = K * M;
|
||||
const size_t T = K * G;
|
||||
|
||||
const size_t MAX_TABLE_FILE_MEM = 128 * M;
|
||||
|
||||
const int VECTOR_TYPE_SIZE = sizeof(float);
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
@ -44,6 +44,8 @@ public:
|
||||
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status BuildIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status DropAll() = 0;
|
||||
|
||||
DB() = default;
|
||||
|
||||
@ -87,8 +87,7 @@ DBImpl::DBImpl(const Options& options)
|
||||
compact_thread_pool_(1, 1),
|
||||
index_thread_pool_(1, 1) {
|
||||
meta_ptr_ = DBMetaImplFactory::Build(options.meta, options.mode);
|
||||
mem_mgr_ = std::make_shared<MemManager>(meta_ptr_, options_);
|
||||
// mem_mgr_ = (MemManagerPtr)(new MemManager(meta_ptr_, options_));
|
||||
mem_mgr_ = MemManagerFactory::Build(meta_ptr_, options_);
|
||||
if (options.mode != Options::MODE::READ_ONLY) {
|
||||
StartTimerTasks();
|
||||
}
|
||||
@ -408,10 +407,10 @@ void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
|
||||
meta_ptr_->CleanUpFilesWithTTL(ttl);
|
||||
}
|
||||
|
||||
void DBImpl::StartBuildIndexTask() {
|
||||
void DBImpl::StartBuildIndexTask(bool force) {
|
||||
static uint64_t index_clock_tick = 0;
|
||||
index_clock_tick++;
|
||||
if(index_clock_tick%INDEX_ACTION_INTERVAL != 0) {
|
||||
if(!force && (index_clock_tick%INDEX_ACTION_INTERVAL != 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -430,6 +429,23 @@ void DBImpl::StartBuildIndexTask() {
|
||||
}
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndex(const std::string& table_id) {
|
||||
bool has = false;
|
||||
meta_ptr_->HasNonIndexFiles(table_id, has);
|
||||
int times = 1;
|
||||
|
||||
while (has) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
/* StartBuildIndexTask(true); */
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10*1000, times*100)));
|
||||
meta_ptr_->HasNonIndexFiles(table_id, has);
|
||||
times++;
|
||||
}
|
||||
return Status::OK();
|
||||
/* return BuildIndexByTable(table_id); */
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
ExecutionEnginePtr to_index = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_);
|
||||
if(to_index == nullptr) {
|
||||
@ -444,6 +460,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
meta::TableFileSchema table_file;
|
||||
table_file.table_id_ = file.table_id_;
|
||||
table_file.date_ = file.date_;
|
||||
table_file.file_type_ = meta::TableFileSchema::INDEX; //for multi-db-path, distribute index file averagely to each path
|
||||
Status status = meta_ptr_->CreateTableFile(table_file);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
@ -469,7 +486,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
|
||||
//step 6: update meta
|
||||
table_file.file_type_ = meta::TableFileSchema::INDEX;
|
||||
table_file.size_ = index->Size();
|
||||
table_file.size_ = index->PhysicalSize();
|
||||
|
||||
auto to_remove = file;
|
||||
to_remove.file_type_ = meta::TableFileSchema::TO_DELETE;
|
||||
@ -485,13 +502,35 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
||||
//index->Cache();
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return Status::Error("Build index encounter exception", ex.what());
|
||||
std::string msg = "Build index encounter exception" + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::BuildIndexByTable(const std::string& table_id) {
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
meta::TableFilesSchema to_index_files;
|
||||
meta_ptr_->FilesToIndex(to_index_files);
|
||||
|
||||
Status status;
|
||||
|
||||
for (auto& file : to_index_files) {
|
||||
status = BuildIndex(file);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Building index for " << file.id_ << " failed: " << status.ToString();
|
||||
return status;
|
||||
}
|
||||
ENGINE_LOG_DEBUG << "Sync building index for " << file.id_ << " passed";
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void DBImpl::BackgroundBuildIndex() {
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
meta::TableFilesSchema to_index_files;
|
||||
meta_ptr_->FilesToIndex(to_index_files);
|
||||
Status status;
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
#include "MemManager.h"
|
||||
#include "Types.h"
|
||||
#include "utils/ThreadPool.h"
|
||||
#include "MemManagerAbstract.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
@ -33,7 +34,6 @@ class Meta;
|
||||
class DBImpl : public DB {
|
||||
public:
|
||||
using MetaPtr = meta::Meta::Ptr;
|
||||
using MemManagerPtr = typename MemManager::Ptr;
|
||||
|
||||
explicit DBImpl(const Options &options);
|
||||
|
||||
@ -82,6 +82,8 @@ class DBImpl : public DB {
|
||||
|
||||
Status Size(uint64_t &result) override;
|
||||
|
||||
Status BuildIndex(const std::string& table_id) override;
|
||||
|
||||
~DBImpl() override;
|
||||
|
||||
private:
|
||||
@ -107,9 +109,11 @@ class DBImpl : public DB {
|
||||
Status BackgroundMergeFiles(const std::string &table_id);
|
||||
void BackgroundCompaction(std::set<std::string> table_ids);
|
||||
|
||||
void StartBuildIndexTask();
|
||||
void StartBuildIndexTask(bool force=false);
|
||||
void BackgroundBuildIndex();
|
||||
|
||||
Status
|
||||
BuildIndexByTable(const std::string& table_id);
|
||||
Status
|
||||
BuildIndex(const meta::TableFileSchema &);
|
||||
|
||||
@ -123,7 +127,7 @@ class DBImpl : public DB {
|
||||
std::thread bg_timer_thread_;
|
||||
|
||||
MetaPtr meta_ptr_;
|
||||
MemManagerPtr mem_mgr_;
|
||||
MemManagerAbstractPtr mem_mgr_;
|
||||
|
||||
server::ThreadPool compact_thread_pool_;
|
||||
std::list<std::future<void>> compact_thread_results_;
|
||||
@ -132,6 +136,8 @@ class DBImpl : public DB {
|
||||
server::ThreadPool index_thread_pool_;
|
||||
std::list<std::future<void>> index_thread_results_;
|
||||
|
||||
std::mutex build_index_mutex_;
|
||||
|
||||
}; // DBImpl
|
||||
|
||||
|
||||
|
||||
@ -83,26 +83,6 @@ using ConnectorT = decltype(StoragePrototype(""));
|
||||
static std::unique_ptr<ConnectorT> ConnectorPtr;
|
||||
using ConditionT = decltype(c(&TableFileSchema::id_) == 1UL);
|
||||
|
||||
std::string DBMetaImpl::GetTablePath(const std::string &table_id) {
|
||||
return options_.path + "/tables/" + table_id;
|
||||
}
|
||||
|
||||
std::string DBMetaImpl::GetTableDatePartitionPath(const std::string &table_id, DateT &date) {
|
||||
std::stringstream ss;
|
||||
ss << GetTablePath(table_id) << "/" << date;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
void DBMetaImpl::GetTableFilePath(TableFileSchema &group_file) {
|
||||
if (group_file.date_ == EmptyDate) {
|
||||
group_file.date_ = Meta::GetDate();
|
||||
}
|
||||
std::stringstream ss;
|
||||
ss << GetTableDatePartitionPath(group_file.table_id_, group_file.date_)
|
||||
<< "/" << group_file.file_id_;
|
||||
group_file.location_ = ss.str();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::NextTableId(std::string &table_id) {
|
||||
std::stringstream ss;
|
||||
SimpleIDGenerator g;
|
||||
@ -196,7 +176,8 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
||||
if(TableSchema::TO_DELETE == std::get<0>(table[0])) {
|
||||
return Status::Error("Table already exists and it is in delete state, please wait a second");
|
||||
} else {
|
||||
return Status::OK();//table already exists, no error
|
||||
// Change from no error to already exist.
|
||||
return Status::AlreadyExist("Table already exists");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -212,15 +193,7 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
||||
return Status::DBTransactionError("Add Table Error");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
if (!boost::filesystem::is_directory(table_path)) {
|
||||
auto ret = boost::filesystem::create_directories(table_path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
}
|
||||
return utils::CreateTablePath(options_, table_schema.table_id_);
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when create table", e);
|
||||
@ -306,9 +279,6 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
||||
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when describe table", e);
|
||||
}
|
||||
@ -316,6 +286,28 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::HasNonIndexFiles(const std::string& table_id, bool& has) {
|
||||
has = false;
|
||||
try {
|
||||
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_),
|
||||
where((c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW
|
||||
or
|
||||
c(&TableFileSchema::file_type_) == (int) TableFileSchema::TO_INDEX)
|
||||
and c(&TableFileSchema::table_id_) == table_id
|
||||
));
|
||||
|
||||
if (selected.size() >= 1) {
|
||||
has = true;
|
||||
} else {
|
||||
has = false;
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when check non index files", e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
|
||||
has_or_not = false;
|
||||
|
||||
@ -388,20 +380,11 @@ Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
||||
file_schema.created_on_ = utils::GetMicroSecTimeStamp();
|
||||
file_schema.updated_time_ = file_schema.created_on_;
|
||||
file_schema.engine_type_ = table_schema.engine_type_;
|
||||
GetTableFilePath(file_schema);
|
||||
|
||||
auto id = ConnectorPtr->insert(file_schema);
|
||||
file_schema.id_ = id;
|
||||
|
||||
auto partition_path = GetTableDatePartitionPath(file_schema.table_id_, file_schema.date_);
|
||||
|
||||
if (!boost::filesystem::is_directory(partition_path)) {
|
||||
auto ret = boost::filesystem::create_directory(partition_path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
|
||||
return Status::DBTransactionError("Failed to create partition directory");
|
||||
}
|
||||
}
|
||||
return utils::CreateTableFilePath(options_, file_schema);
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return HandleException("Encounter exception when create table file", ex);
|
||||
@ -438,7 +421,7 @@ Status DBMetaImpl::FilesToIndex(TableFilesSchema &files) {
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto groupItr = groups.find(table_file.table_id_);
|
||||
if (groupItr == groups.end()) {
|
||||
TableSchema table_schema;
|
||||
@ -501,7 +484,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
@ -543,7 +526,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
@ -593,7 +576,7 @@ Status DBMetaImpl::FilesToMerge(const std::string &table_id,
|
||||
table_file.size_ = std::get<4>(file);
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
@ -639,7 +622,7 @@ Status DBMetaImpl::GetTableFiles(const std::string& table_id,
|
||||
file_schema.date_ = std::get<4>(file);
|
||||
file_schema.engine_type_ = std::get<5>(file);
|
||||
file_schema.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(file_schema);
|
||||
utils::GetTableFilePath(options_, file_schema);
|
||||
|
||||
table_files.emplace_back(file_schema);
|
||||
}
|
||||
@ -791,6 +774,23 @@ Status DBMetaImpl::UpdateTableFile(TableFileSchema &file_schema) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) {
|
||||
try {
|
||||
ConnectorPtr->update_all(
|
||||
set(
|
||||
c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_INDEX
|
||||
),
|
||||
where(
|
||||
c(&TableFileSchema::table_id_) == table_id and
|
||||
c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when update table files to to_index", e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
|
||||
try {
|
||||
MetricCollector metric;
|
||||
@ -855,10 +855,9 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
||||
table_file.table_id_ = std::get<1>(file);
|
||||
table_file.file_id_ = std::get<2>(file);
|
||||
table_file.date_ = std::get<3>(file);
|
||||
GetTableFilePath(table_file);
|
||||
|
||||
utils::DeleteTableFilePath(options_, table_file);
|
||||
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = " << table_file.location_ << std::endl;
|
||||
boost::filesystem::remove(table_file.location_);
|
||||
ConnectorPtr->remove<TableFileSchema>(table_file.id_);
|
||||
|
||||
}
|
||||
@ -882,10 +881,7 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
||||
|
||||
auto commited = ConnectorPtr->transaction([&]() mutable {
|
||||
for (auto &table : tables) {
|
||||
auto table_path = GetTablePath(std::get<1>(table));
|
||||
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
utils::DeleteTablePath(options_, std::get<1>(table));
|
||||
ConnectorPtr->remove<TableSchema>(std::get<0>(table));
|
||||
}
|
||||
|
||||
|
||||
@ -35,6 +35,10 @@ public:
|
||||
const std::vector<size_t>& ids,
|
||||
TableFilesSchema& table_files) override;
|
||||
|
||||
virtual Status HasNonIndexFiles(const std::string& table_id, bool& has) override;
|
||||
|
||||
virtual Status UpdateTableFilesToIndex(const std::string& table_id) override;
|
||||
|
||||
virtual Status UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
virtual Status UpdateTableFiles(TableFilesSchema& files) override;
|
||||
@ -66,9 +70,6 @@ private:
|
||||
Status NextFileId(std::string& file_id);
|
||||
Status NextTableId(std::string& table_id);
|
||||
Status DiscardFiles(long to_discard_size);
|
||||
std::string GetTablePath(const std::string& table_id);
|
||||
std::string GetTableDatePartitionPath(const std::string& table_id, DateT& date);
|
||||
void GetTableFilePath(TableFileSchema& group_file);
|
||||
Status Initialize();
|
||||
|
||||
const DBMetaOptions options_;
|
||||
|
||||
@ -22,13 +22,19 @@ EngineFactory::Build(uint16_t dimension,
|
||||
switch (type) {
|
||||
case EngineType::FAISS_IDMAP: {
|
||||
execution_engine_ptr =
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, "IDMap", "IDMap,Flat"));
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, BUILD_INDEX_TYPE_IDMAP, "IDMap,Flat"));
|
||||
break;
|
||||
}
|
||||
|
||||
case EngineType::FAISS_IVFFLAT: {
|
||||
execution_engine_ptr =
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, "IVF", "IDMap,Flat"));
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, BUILD_INDEX_TYPE_IVF, "IDMap,Flat"));
|
||||
break;
|
||||
}
|
||||
|
||||
case EngineType::FAISS_IVFSQ8: {
|
||||
execution_engine_ptr =
|
||||
ExecutionEnginePtr(new FaissExecutionEngine(dimension, location, BUILD_INDEX_TYPE_IVFSQ8, "IDMap,Flat"));
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@ -18,6 +18,7 @@ enum class EngineType {
|
||||
INVALID = 0,
|
||||
FAISS_IDMAP = 1,
|
||||
FAISS_IVFFLAT,
|
||||
FAISS_IVFSQ8,
|
||||
};
|
||||
|
||||
class ExecutionEngine {
|
||||
|
||||
@ -3,10 +3,14 @@
|
||||
// Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
// Proprietary and confidential.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "Factories.h"
|
||||
#include "DBImpl.h"
|
||||
#include "MemManager.h"
|
||||
#include "NewMemManager.h"
|
||||
#include "Exception.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
@ -14,7 +18,9 @@
|
||||
#include <assert.h>
|
||||
#include <easylogging++.h>
|
||||
#include <regex>
|
||||
#include "Exception.h"
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -72,17 +78,14 @@ std::shared_ptr<meta::Meta> DBMetaImplFactory::Build(const DBMetaOptions& metaOp
|
||||
if (dialect.find("mysql") != std::string::npos) {
|
||||
ENGINE_LOG_INFO << "Using MySQL";
|
||||
return std::make_shared<meta::MySQLMetaImpl>(meta::MySQLMetaImpl(metaOptions, mode));
|
||||
}
|
||||
else if (dialect.find("sqlite") != std::string::npos) {
|
||||
} else if (dialect.find("sqlite") != std::string::npos) {
|
||||
ENGINE_LOG_INFO << "Using SQLite";
|
||||
return std::make_shared<meta::DBMetaImpl>(meta::DBMetaImpl(metaOptions));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ENGINE_LOG_ERROR << "Invalid dialect in URI: dialect = " << dialect;
|
||||
throw InvalidArgumentException("URI dialect is not mysql / sqlite");
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ENGINE_LOG_ERROR << "Wrong URI format: URI = " << uri;
|
||||
throw InvalidArgumentException("Wrong URI format ");
|
||||
}
|
||||
@ -98,6 +101,21 @@ DB* DBFactory::Build(const Options& options) {
|
||||
return new DBImpl(options);
|
||||
}
|
||||
|
||||
MemManagerAbstractPtr MemManagerFactory::Build(const std::shared_ptr<meta::Meta>& meta,
|
||||
const Options& options) {
|
||||
if (const char* env = getenv("MILVUS_USE_OLD_MEM_MANAGER")) {
|
||||
std::string env_str = env;
|
||||
std::transform(env_str.begin(), env_str.end(), env_str.begin(), ::toupper);
|
||||
if (env_str == "ON") {
|
||||
return std::make_shared<MemManager>(meta, options);
|
||||
}
|
||||
else {
|
||||
return std::make_shared<NewMemManager>(meta, options);
|
||||
}
|
||||
}
|
||||
return std::make_shared<NewMemManager>(meta, options);
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
||||
@ -10,16 +10,18 @@
|
||||
#include "MySQLMetaImpl.h"
|
||||
#include "Options.h"
|
||||
#include "ExecutionEngine.h"
|
||||
#include "MemManagerAbstract.h"
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
struct DBMetaOptionsFactory {
|
||||
static DBMetaOptions Build(const std::string& path = "");
|
||||
static DBMetaOptions Build(const std::string &path = "");
|
||||
};
|
||||
|
||||
struct OptionsFactory {
|
||||
@ -28,12 +30,16 @@ struct OptionsFactory {
|
||||
|
||||
struct DBMetaImplFactory {
|
||||
static std::shared_ptr<meta::DBMetaImpl> Build();
|
||||
static std::shared_ptr<meta::Meta> Build(const DBMetaOptions& metaOptions, const int& mode);
|
||||
static std::shared_ptr<meta::Meta> Build(const DBMetaOptions &metaOptions, const int &mode);
|
||||
};
|
||||
|
||||
struct DBFactory {
|
||||
static std::shared_ptr<DB> Build();
|
||||
static DB* Build(const Options&);
|
||||
static DB *Build(const Options &);
|
||||
};
|
||||
|
||||
struct MemManagerFactory {
|
||||
static MemManagerAbstractPtr Build(const std::shared_ptr<meta::Meta> &meta, const Options &options);
|
||||
};
|
||||
|
||||
} // namespace engine
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
******************************************************************************/
|
||||
#include "FaissExecutionEngine.h"
|
||||
#include "Log.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
|
||||
#include <faiss/AutoTune.h>
|
||||
#include <faiss/MetaIndexes.h>
|
||||
@ -21,15 +22,25 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace {
|
||||
std::string GetMetricType() {
|
||||
server::ServerConfig &config = server::ServerConfig::GetInstance();
|
||||
server::ConfigNode engine_config = config.GetConfig(server::CONFIG_ENGINE);
|
||||
return engine_config.GetValue(server::CONFIG_METRICTYPE, "L2");
|
||||
}
|
||||
}
|
||||
|
||||
FaissExecutionEngine::FaissExecutionEngine(uint16_t dimension,
|
||||
const std::string& location,
|
||||
const std::string& build_index_type,
|
||||
const std::string& raw_index_type)
|
||||
: pIndex_(faiss::index_factory(dimension, raw_index_type.c_str())),
|
||||
location_(location),
|
||||
: location_(location),
|
||||
build_index_type_(build_index_type),
|
||||
raw_index_type_(raw_index_type) {
|
||||
|
||||
std::string metric_type = GetMetricType();
|
||||
faiss::MetricType faiss_metric_type = (metric_type == "L2") ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
pIndex_.reset(faiss::index_factory(dimension, raw_index_type.c_str(), faiss_metric_type));
|
||||
}
|
||||
|
||||
FaissExecutionEngine::FaissExecutionEngine(std::shared_ptr<faiss::Index> index,
|
||||
@ -60,7 +71,7 @@ size_t FaissExecutionEngine::Dimension() const {
|
||||
}
|
||||
|
||||
size_t FaissExecutionEngine::PhysicalSize() const {
|
||||
return (size_t)(Count() * pIndex_->d)*sizeof(float);
|
||||
return server::CommonUtil::GetFileSize(location_);
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Serialize() {
|
||||
@ -118,6 +129,7 @@ FaissExecutionEngine::BuildIndex(const std::string& location) {
|
||||
auto opd = std::make_shared<Operand>();
|
||||
opd->d = pIndex_->d;
|
||||
opd->index_type = build_index_type_;
|
||||
opd->metric_type = GetMetricType();
|
||||
IndexBuilderPtr pBuilder = GetIndexBuilder(opd);
|
||||
|
||||
auto from_index = dynamic_cast<faiss::IndexIDMap*>(pIndex_.get());
|
||||
@ -161,14 +173,16 @@ Status FaissExecutionEngine::Cache() {
|
||||
|
||||
Status FaissExecutionEngine::Init() {
|
||||
|
||||
if(build_index_type_ == "IVF") {
|
||||
if(build_index_type_ == BUILD_INDEX_TYPE_IVF ||
|
||||
build_index_type_ == BUILD_INDEX_TYPE_IVFSQ8) {
|
||||
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
nprobe_ = engine_config.GetInt32Value(CONFIG_NPROBE, 1000);
|
||||
nlist_ = engine_config.GetInt32Value(CONFIG_NLIST,16384);
|
||||
|
||||
} else if(build_index_type_ == "IDMap") {
|
||||
} else if(build_index_type_ == BUILD_INDEX_TYPE_IDMAP) {
|
||||
;
|
||||
} else {
|
||||
return Status::Error("Wrong index type: ", build_index_type_);
|
||||
|
||||
@ -15,6 +15,9 @@ namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
const static std::string BUILD_INDEX_TYPE_IDMAP = "IDMap";
|
||||
const static std::string BUILD_INDEX_TYPE_IVF = "IVF";
|
||||
const static std::string BUILD_INDEX_TYPE_IVFSQ8 = "IVFSQ8";
|
||||
|
||||
class FaissExecutionEngine : public ExecutionEngine {
|
||||
public:
|
||||
@ -65,6 +68,7 @@ protected:
|
||||
std::string raw_index_type_;
|
||||
|
||||
size_t nprobe_ = 0;
|
||||
size_t nlist_ = 0;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -8,28 +8,30 @@
|
||||
#include "MetaConsts.h"
|
||||
#include "EngineFactory.h"
|
||||
#include "metrics/Metrics.h"
|
||||
#include "Log.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <thread>
|
||||
#include <easylogging++.h>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
MemVectors::MemVectors(const std::shared_ptr<meta::Meta>& meta_ptr,
|
||||
const meta::TableFileSchema& schema, const Options& options)
|
||||
: meta_(meta_ptr),
|
||||
options_(options),
|
||||
schema_(schema),
|
||||
id_generator_(new SimpleIDGenerator()),
|
||||
active_engine_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) {
|
||||
MemVectors::MemVectors(const std::shared_ptr<meta::Meta> &meta_ptr,
|
||||
const meta::TableFileSchema &schema, const Options &options)
|
||||
: meta_(meta_ptr),
|
||||
options_(options),
|
||||
schema_(schema),
|
||||
id_generator_(new SimpleIDGenerator()),
|
||||
active_engine_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType) schema_.engine_type_)) {
|
||||
}
|
||||
|
||||
|
||||
Status MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) {
|
||||
if(active_engine_ == nullptr) {
|
||||
Status MemVectors::Add(size_t n_, const float *vectors_, IDNumbers &vector_ids_) {
|
||||
if (active_engine_ == nullptr) {
|
||||
return Status::Error("index engine is null");
|
||||
}
|
||||
|
||||
@ -38,13 +40,15 @@ Status MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_)
|
||||
Status status = active_engine_->AddWithIds(n_, vectors_, vector_ids_.data());
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast<int>(n_), static_cast<int>(schema_.dimension_), total_time);
|
||||
server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast<int>(n_),
|
||||
static_cast<int>(schema_.dimension_),
|
||||
total_time);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
size_t MemVectors::RowCount() const {
|
||||
if(active_engine_ == nullptr) {
|
||||
if (active_engine_ == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -52,15 +56,15 @@ size_t MemVectors::RowCount() const {
|
||||
}
|
||||
|
||||
size_t MemVectors::Size() const {
|
||||
if(active_engine_ == nullptr) {
|
||||
if (active_engine_ == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return active_engine_->Size();
|
||||
}
|
||||
|
||||
Status MemVectors::Serialize(std::string& table_id) {
|
||||
if(active_engine_ == nullptr) {
|
||||
Status MemVectors::Serialize(std::string &table_id) {
|
||||
if (active_engine_ == nullptr) {
|
||||
return Status::Error("index engine is null");
|
||||
}
|
||||
|
||||
@ -72,15 +76,16 @@ Status MemVectors::Serialize(std::string& table_id) {
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
schema_.size_ = size;
|
||||
|
||||
server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet(size/total_time);
|
||||
server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet(size / total_time);
|
||||
|
||||
schema_.file_type_ = (size >= options_.index_trigger_size) ?
|
||||
meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW;
|
||||
meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW;
|
||||
|
||||
auto status = meta_->UpdateTableFile(schema_);
|
||||
|
||||
LOG(DEBUG) << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
|
||||
<< " file " << schema_.file_id_ << " of size " << (double)(active_engine_->Size()) / (double)meta::M << " M";
|
||||
<< " file " << schema_.file_id_ << " of size " << (double) (active_engine_->Size()) / (double) meta::M
|
||||
<< " M";
|
||||
|
||||
active_engine_->Cache();
|
||||
|
||||
@ -98,7 +103,7 @@ MemVectors::~MemVectors() {
|
||||
* MemManager
|
||||
*/
|
||||
MemManager::MemVectorsPtr MemManager::GetMemByTable(
|
||||
const std::string& table_id) {
|
||||
const std::string &table_id) {
|
||||
auto memIt = mem_id_map_.find(table_id);
|
||||
if (memIt != mem_id_map_.end()) {
|
||||
return memIt->second;
|
||||
@ -115,26 +120,31 @@ MemManager::MemVectorsPtr MemManager::GetMemByTable(
|
||||
return mem_id_map_[table_id];
|
||||
}
|
||||
|
||||
Status MemManager::InsertVectors(const std::string& table_id_,
|
||||
size_t n_,
|
||||
const float* vectors_,
|
||||
IDNumbers& vector_ids_) {
|
||||
Status MemManager::InsertVectors(const std::string &table_id_,
|
||||
size_t n_,
|
||||
const float *vectors_,
|
||||
IDNumbers &vector_ids_) {
|
||||
|
||||
LOG(DEBUG) << "MemManager::InsertVectors: mutable mem = " << GetCurrentMutableMem() <<
|
||||
", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem();
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
|
||||
return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_);
|
||||
}
|
||||
|
||||
Status MemManager::InsertVectorsNoLock(const std::string& table_id,
|
||||
size_t n,
|
||||
const float* vectors,
|
||||
IDNumbers& vector_ids) {
|
||||
Status MemManager::InsertVectorsNoLock(const std::string &table_id,
|
||||
size_t n,
|
||||
const float *vectors,
|
||||
IDNumbers &vector_ids) {
|
||||
|
||||
MemVectorsPtr mem = GetMemByTable(table_id);
|
||||
if (mem == nullptr) {
|
||||
return Status::NotFound("Group " + table_id + " not found!");
|
||||
}
|
||||
|
||||
//makesure each file size less than index_trigger_size
|
||||
if(mem->Size() > options_.index_trigger_size) {
|
||||
if (mem->Size() > options_.index_trigger_size) {
|
||||
std::unique_lock<std::mutex> lock(serialization_mtx_);
|
||||
immu_mem_list_.push_back(mem);
|
||||
mem_id_map_.erase(table_id);
|
||||
@ -147,8 +157,8 @@ Status MemManager::InsertVectorsNoLock(const std::string& table_id,
|
||||
Status MemManager::ToImmutable() {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
MemIdMap temp_map;
|
||||
for (auto& kv: mem_id_map_) {
|
||||
if(kv.second->RowCount() == 0) {
|
||||
for (auto &kv: mem_id_map_) {
|
||||
if (kv.second->RowCount() == 0) {
|
||||
temp_map.insert(kv);
|
||||
continue;//empty vector, no need to serialize
|
||||
}
|
||||
@ -159,12 +169,12 @@ Status MemManager::ToImmutable() {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MemManager::Serialize(std::set<std::string>& table_ids) {
|
||||
Status MemManager::Serialize(std::set<std::string> &table_ids) {
|
||||
ToImmutable();
|
||||
std::unique_lock<std::mutex> lock(serialization_mtx_);
|
||||
std::string table_id;
|
||||
table_ids.clear();
|
||||
for (auto& mem : immu_mem_list_) {
|
||||
for (auto &mem : immu_mem_list_) {
|
||||
mem->Serialize(table_id);
|
||||
table_ids.insert(table_id);
|
||||
}
|
||||
@ -172,7 +182,7 @@ Status MemManager::Serialize(std::set<std::string>& table_ids) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MemManager::EraseMemVector(const std::string& table_id) {
|
||||
Status MemManager::EraseMemVector(const std::string &table_id) {
|
||||
{//erase MemVector from rapid-insert cache
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
mem_id_map_.erase(table_id);
|
||||
@ -181,8 +191,8 @@ Status MemManager::EraseMemVector(const std::string& table_id) {
|
||||
{//erase MemVector from serialize cache
|
||||
std::unique_lock<std::mutex> lock(serialization_mtx_);
|
||||
MemList temp_list;
|
||||
for (auto& mem : immu_mem_list_) {
|
||||
if(mem->TableId() != table_id) {
|
||||
for (auto &mem : immu_mem_list_) {
|
||||
if (mem->TableId() != table_id) {
|
||||
temp_list.push_back(mem);
|
||||
}
|
||||
}
|
||||
@ -192,6 +202,26 @@ Status MemManager::EraseMemVector(const std::string& table_id) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
size_t MemManager::GetCurrentMutableMem() {
|
||||
size_t totalMem = 0;
|
||||
for (auto &kv : mem_id_map_) {
|
||||
auto memVector = kv.second;
|
||||
totalMem += memVector->Size();
|
||||
}
|
||||
return totalMem;
|
||||
}
|
||||
|
||||
size_t MemManager::GetCurrentImmutableMem() {
|
||||
size_t totalMem = 0;
|
||||
for (auto &memVector : immu_mem_list_) {
|
||||
totalMem += memVector->Size();
|
||||
}
|
||||
return totalMem;
|
||||
}
|
||||
|
||||
size_t MemManager::GetCurrentMem() {
|
||||
return GetCurrentMutableMem() + GetCurrentImmutableMem();
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
#include "IDGenerator.h"
|
||||
#include "Status.h"
|
||||
#include "Meta.h"
|
||||
#include "MemManagerAbstract.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
@ -17,72 +18,79 @@
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace meta {
|
||||
class Meta;
|
||||
class Meta;
|
||||
}
|
||||
|
||||
class MemVectors {
|
||||
public:
|
||||
public:
|
||||
using MetaPtr = meta::Meta::Ptr;
|
||||
using Ptr = std::shared_ptr<MemVectors>;
|
||||
|
||||
explicit MemVectors(const std::shared_ptr<meta::Meta>&,
|
||||
const meta::TableFileSchema&, const Options&);
|
||||
explicit MemVectors(const std::shared_ptr<meta::Meta> &,
|
||||
const meta::TableFileSchema &, const Options &);
|
||||
|
||||
Status Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_);
|
||||
Status Add(size_t n_, const float *vectors_, IDNumbers &vector_ids_);
|
||||
|
||||
size_t RowCount() const;
|
||||
|
||||
size_t Size() const;
|
||||
|
||||
Status Serialize(std::string& table_id);
|
||||
Status Serialize(std::string &table_id);
|
||||
|
||||
~MemVectors();
|
||||
|
||||
const std::string& Location() const { return schema_.location_; }
|
||||
const std::string &Location() const { return schema_.location_; }
|
||||
|
||||
std::string TableId() const { return schema_.table_id_; }
|
||||
|
||||
private:
|
||||
private:
|
||||
MemVectors() = delete;
|
||||
MemVectors(const MemVectors&) = delete;
|
||||
MemVectors& operator=(const MemVectors&) = delete;
|
||||
MemVectors(const MemVectors &) = delete;
|
||||
MemVectors &operator=(const MemVectors &) = delete;
|
||||
|
||||
MetaPtr meta_;
|
||||
Options options_;
|
||||
meta::TableFileSchema schema_;
|
||||
IDGenerator* id_generator_;
|
||||
IDGenerator *id_generator_;
|
||||
ExecutionEnginePtr active_engine_;
|
||||
|
||||
}; // MemVectors
|
||||
|
||||
|
||||
|
||||
class MemManager {
|
||||
public:
|
||||
class MemManager : public MemManagerAbstract {
|
||||
public:
|
||||
using MetaPtr = meta::Meta::Ptr;
|
||||
using MemVectorsPtr = typename MemVectors::Ptr;
|
||||
using Ptr = std::shared_ptr<MemManager>;
|
||||
|
||||
MemManager(const std::shared_ptr<meta::Meta>& meta, const Options& options)
|
||||
MemManager(const std::shared_ptr<meta::Meta> &meta, const Options &options)
|
||||
: meta_(meta), options_(options) {}
|
||||
|
||||
MemVectorsPtr GetMemByTable(const std::string& table_id);
|
||||
Status InsertVectors(const std::string &table_id,
|
||||
size_t n, const float *vectors, IDNumbers &vector_ids) override;
|
||||
|
||||
Status InsertVectors(const std::string& table_id,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids);
|
||||
Status Serialize(std::set<std::string> &table_ids) override;
|
||||
|
||||
Status Serialize(std::set<std::string>& table_ids);
|
||||
Status EraseMemVector(const std::string &table_id) override;
|
||||
|
||||
Status EraseMemVector(const std::string& table_id);
|
||||
size_t GetCurrentMutableMem() override;
|
||||
|
||||
private:
|
||||
Status InsertVectorsNoLock(const std::string& table_id,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids);
|
||||
size_t GetCurrentImmutableMem() override;
|
||||
|
||||
size_t GetCurrentMem() override;
|
||||
|
||||
private:
|
||||
MemVectorsPtr GetMemByTable(const std::string &table_id);
|
||||
|
||||
Status InsertVectorsNoLock(const std::string &table_id,
|
||||
size_t n, const float *vectors, IDNumbers &vector_ids);
|
||||
Status ToImmutable();
|
||||
|
||||
using MemIdMap = std::map<std::string, MemVectorsPtr>;
|
||||
|
||||
32
cpp/src/db/MemManagerAbstract.h
Normal file
32
cpp/src/db/MemManagerAbstract.h
Normal file
@ -0,0 +1,32 @@
|
||||
#pragma once
|
||||
|
||||
#include <set>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class MemManagerAbstract {
|
||||
public:
|
||||
|
||||
virtual Status InsertVectors(const std::string &table_id,
|
||||
size_t n, const float *vectors, IDNumbers &vector_ids) = 0;
|
||||
|
||||
virtual Status Serialize(std::set<std::string> &table_ids) = 0;
|
||||
|
||||
virtual Status EraseMemVector(const std::string &table_id) = 0;
|
||||
|
||||
virtual size_t GetCurrentMutableMem() = 0;
|
||||
|
||||
virtual size_t GetCurrentImmutableMem() = 0;
|
||||
|
||||
virtual size_t GetCurrentMem() = 0;
|
||||
|
||||
}; // MemManagerAbstract
|
||||
|
||||
using MemManagerAbstractPtr = std::shared_ptr<MemManagerAbstract>;
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
88
cpp/src/db/MemTable.cpp
Normal file
88
cpp/src/db/MemTable.cpp
Normal file
@ -0,0 +1,88 @@
|
||||
#include "MemTable.h"
|
||||
#include "Log.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
MemTable::MemTable(const std::string &table_id,
|
||||
const std::shared_ptr<meta::Meta> &meta,
|
||||
const Options &options) :
|
||||
table_id_(table_id),
|
||||
meta_(meta),
|
||||
options_(options) {
|
||||
|
||||
}
|
||||
|
||||
Status MemTable::Add(VectorSource::Ptr &source) {
|
||||
|
||||
while (!source->AllAdded()) {
|
||||
|
||||
MemTableFile::Ptr current_mem_table_file;
|
||||
if (!mem_table_file_list_.empty()) {
|
||||
current_mem_table_file = mem_table_file_list_.back();
|
||||
}
|
||||
|
||||
Status status;
|
||||
if (mem_table_file_list_.empty() || current_mem_table_file->IsFull()) {
|
||||
MemTableFile::Ptr new_mem_table_file = std::make_shared<MemTableFile>(table_id_, meta_, options_);
|
||||
status = new_mem_table_file->Add(source);
|
||||
if (status.ok()) {
|
||||
mem_table_file_list_.emplace_back(new_mem_table_file);
|
||||
}
|
||||
} else {
|
||||
status = current_mem_table_file->Add(source);
|
||||
}
|
||||
|
||||
if (!status.ok()) {
|
||||
std::string err_msg = "MemTable::Add failed: " + status.ToString();
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
return Status::Error(err_msg);
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
void MemTable::GetCurrentMemTableFile(MemTableFile::Ptr &mem_table_file) {
|
||||
mem_table_file = mem_table_file_list_.back();
|
||||
}
|
||||
|
||||
size_t MemTable::GetTableFileCount() {
|
||||
return mem_table_file_list_.size();
|
||||
}
|
||||
|
||||
Status MemTable::Serialize() {
|
||||
for (auto mem_table_file = mem_table_file_list_.begin(); mem_table_file != mem_table_file_list_.end();) {
|
||||
auto status = (*mem_table_file)->Serialize();
|
||||
if (!status.ok()) {
|
||||
std::string err_msg = "MemTable::Serialize failed: " + status.ToString();
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
return Status::Error(err_msg);
|
||||
}
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
mem_table_file = mem_table_file_list_.erase(mem_table_file);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
bool MemTable::Empty() {
|
||||
return mem_table_file_list_.empty();
|
||||
}
|
||||
|
||||
const std::string &MemTable::GetTableId() const {
|
||||
return table_id_;
|
||||
}
|
||||
|
||||
size_t MemTable::GetCurrentMem() {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
size_t total_mem = 0;
|
||||
for (auto &mem_table_file : mem_table_file_list_) {
|
||||
total_mem += mem_table_file->GetCurrentMem();
|
||||
}
|
||||
return total_mem;
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
53
cpp/src/db/MemTable.h
Normal file
53
cpp/src/db/MemTable.h
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
#include "Status.h"
|
||||
#include "MemTableFile.h"
|
||||
#include "VectorSource.h"
|
||||
|
||||
#include <mutex>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class MemTable {
|
||||
|
||||
public:
|
||||
|
||||
using Ptr = std::shared_ptr<MemTable>;
|
||||
using MemTableFileList = std::vector<MemTableFile::Ptr>;
|
||||
using MetaPtr = meta::Meta::Ptr;
|
||||
|
||||
MemTable(const std::string &table_id, const std::shared_ptr<meta::Meta> &meta, const Options &options);
|
||||
|
||||
Status Add(VectorSource::Ptr &source);
|
||||
|
||||
void GetCurrentMemTableFile(MemTableFile::Ptr &mem_table_file);
|
||||
|
||||
size_t GetTableFileCount();
|
||||
|
||||
Status Serialize();
|
||||
|
||||
bool Empty();
|
||||
|
||||
const std::string &GetTableId() const;
|
||||
|
||||
size_t GetCurrentMem();
|
||||
|
||||
private:
|
||||
const std::string table_id_;
|
||||
|
||||
MemTableFileList mem_table_file_list_;
|
||||
|
||||
MetaPtr meta_;
|
||||
|
||||
Options options_;
|
||||
|
||||
std::mutex mutex_;
|
||||
|
||||
}; //MemTable
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
108
cpp/src/db/MemTableFile.cpp
Normal file
108
cpp/src/db/MemTableFile.cpp
Normal file
@ -0,0 +1,108 @@
|
||||
#include "MemTableFile.h"
|
||||
#include "Constants.h"
|
||||
#include "Log.h"
|
||||
#include "EngineFactory.h"
|
||||
#include "metrics/Metrics.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
MemTableFile::MemTableFile(const std::string &table_id,
|
||||
const std::shared_ptr<meta::Meta> &meta,
|
||||
const Options &options) :
|
||||
table_id_(table_id),
|
||||
meta_(meta),
|
||||
options_(options) {
|
||||
|
||||
current_mem_ = 0;
|
||||
auto status = CreateTableFile();
|
||||
if (status.ok()) {
|
||||
execution_engine_ = EngineFactory::Build(table_file_schema_.dimension_,
|
||||
table_file_schema_.location_,
|
||||
(EngineType) table_file_schema_.engine_type_);
|
||||
}
|
||||
}
|
||||
|
||||
Status MemTableFile::CreateTableFile() {
|
||||
|
||||
meta::TableFileSchema table_file_schema;
|
||||
table_file_schema.table_id_ = table_id_;
|
||||
auto status = meta_->CreateTableFile(table_file_schema);
|
||||
if (status.ok()) {
|
||||
table_file_schema_ = table_file_schema;
|
||||
} else {
|
||||
std::string err_msg = "MemTableFile::CreateTableFile failed: " + status.ToString();
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
Status MemTableFile::Add(const VectorSource::Ptr &source) {
|
||||
|
||||
if (table_file_schema_.dimension_ <= 0) {
|
||||
std::string err_msg = "MemTableFile::Add: table_file_schema dimension = " +
|
||||
std::to_string(table_file_schema_.dimension_) + ", table_id = " + table_file_schema_.table_id_;
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
return Status::Error(err_msg);
|
||||
}
|
||||
|
||||
size_t single_vector_mem_size = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE;
|
||||
size_t mem_left = GetMemLeft();
|
||||
if (mem_left >= single_vector_mem_size) {
|
||||
size_t num_vectors_to_add = std::ceil(mem_left / single_vector_mem_size);
|
||||
size_t num_vectors_added;
|
||||
auto status = source->Add(execution_engine_, table_file_schema_, num_vectors_to_add, num_vectors_added);
|
||||
if (status.ok()) {
|
||||
current_mem_ += (num_vectors_added * single_vector_mem_size);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
size_t MemTableFile::GetCurrentMem() {
|
||||
return current_mem_;
|
||||
}
|
||||
|
||||
size_t MemTableFile::GetMemLeft() {
|
||||
return (MAX_TABLE_FILE_MEM - current_mem_);
|
||||
}
|
||||
|
||||
bool MemTableFile::IsFull() {
|
||||
size_t single_vector_mem_size = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE;
|
||||
return (GetMemLeft() < single_vector_mem_size);
|
||||
}
|
||||
|
||||
Status MemTableFile::Serialize() {
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
|
||||
auto size = GetCurrentMem();
|
||||
|
||||
execution_engine_->Serialize();
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
table_file_schema_.size_ = size;
|
||||
|
||||
server::Metrics::GetInstance().DiskStoreIOSpeedGaugeSet((double) size / total_time);
|
||||
|
||||
table_file_schema_.file_type_ = (size >= options_.index_trigger_size) ?
|
||||
meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW;
|
||||
|
||||
auto status = meta_->UpdateTableFile(table_file_schema_);
|
||||
|
||||
LOG(DEBUG) << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
|
||||
<< " file " << table_file_schema_.file_id_ << " of size " << (double) size / (double) M << " M";
|
||||
|
||||
execution_engine_->Cache();
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
52
cpp/src/db/MemTableFile.h
Normal file
52
cpp/src/db/MemTableFile.h
Normal file
@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
|
||||
#include "Status.h"
|
||||
#include "Meta.h"
|
||||
#include "VectorSource.h"
|
||||
#include "ExecutionEngine.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class MemTableFile {
|
||||
|
||||
public:
|
||||
|
||||
using Ptr = std::shared_ptr<MemTableFile>;
|
||||
using MetaPtr = meta::Meta::Ptr;
|
||||
|
||||
MemTableFile(const std::string &table_id, const std::shared_ptr<meta::Meta> &meta, const Options &options);
|
||||
|
||||
Status Add(const VectorSource::Ptr &source);
|
||||
|
||||
size_t GetCurrentMem();
|
||||
|
||||
size_t GetMemLeft();
|
||||
|
||||
bool IsFull();
|
||||
|
||||
Status Serialize();
|
||||
|
||||
private:
|
||||
|
||||
Status CreateTableFile();
|
||||
|
||||
const std::string table_id_;
|
||||
|
||||
meta::TableFileSchema table_file_schema_;
|
||||
|
||||
MetaPtr meta_;
|
||||
|
||||
Options options_;
|
||||
|
||||
size_t current_mem_;
|
||||
|
||||
ExecutionEnginePtr execution_engine_;
|
||||
|
||||
}; //MemTableFile
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
@ -39,6 +39,8 @@ public:
|
||||
const std::vector<size_t>& ids,
|
||||
TableFilesSchema& table_files) = 0;
|
||||
|
||||
virtual Status UpdateTableFilesToIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status UpdateTableFile(TableFileSchema& file_schema) = 0;
|
||||
|
||||
virtual Status UpdateTableFiles(TableFilesSchema& files) = 0;
|
||||
@ -56,6 +58,8 @@ public:
|
||||
|
||||
virtual Status FilesToIndex(TableFilesSchema&) = 0;
|
||||
|
||||
virtual Status HasNonIndexFiles(const std::string& table_id, bool& has) = 0;
|
||||
|
||||
virtual Status CleanUp() = 0;
|
||||
virtual Status CleanUpFilesWithTTL(uint16_t) = 0;
|
||||
|
||||
|
||||
@ -31,7 +31,6 @@ struct TableSchema {
|
||||
int state_ = (int)NORMAL;
|
||||
size_t files_cnt_ = 0;
|
||||
uint16_t dimension_ = 0;
|
||||
std::string location_;
|
||||
long created_on_ = 0;
|
||||
int engine_type_ = (int)EngineType::FAISS_IDMAP;
|
||||
bool store_raw_data_ = false;
|
||||
|
||||
@ -67,26 +67,6 @@ namespace meta {
|
||||
|
||||
}
|
||||
|
||||
std::string MySQLMetaImpl::GetTablePath(const std::string &table_id) {
|
||||
return options_.path + "/tables/" + table_id;
|
||||
}
|
||||
|
||||
std::string MySQLMetaImpl::GetTableDatePartitionPath(const std::string &table_id, DateT &date) {
|
||||
std::stringstream ss;
|
||||
ss << GetTablePath(table_id) << "/" << date;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
void MySQLMetaImpl::GetTableFilePath(TableFileSchema &group_file) {
|
||||
if (group_file.date_ == EmptyDate) {
|
||||
group_file.date_ = Meta::GetDate();
|
||||
}
|
||||
std::stringstream ss;
|
||||
ss << GetTableDatePartitionPath(group_file.table_id_, group_file.date_)
|
||||
<< "/" << group_file.file_id_;
|
||||
group_file.location_ = ss.str();
|
||||
}
|
||||
|
||||
Status MySQLMetaImpl::NextTableId(std::string &table_id) {
|
||||
std::stringstream ss;
|
||||
SimpleIDGenerator g;
|
||||
@ -412,15 +392,8 @@ namespace meta {
|
||||
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
// server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
if (!boost::filesystem::is_directory(table_path)) {
|
||||
auto ret = boost::filesystem::create_directories(table_path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
}
|
||||
return utils::CreateTablePath(options_, table_schema.table_id_);
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN ADDING TABLE" << ": " << er.what();
|
||||
@ -436,6 +409,11 @@ namespace meta {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MySQLMetaImpl::HasNonIndexFiles(const std::string& table_id, bool& has) {
|
||||
// TODO
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MySQLMetaImpl::DeleteTable(const std::string& table_id) {
|
||||
|
||||
// std::lock_guard<std::recursive_mutex> lock(mysql_mutex);
|
||||
@ -583,9 +561,6 @@ namespace meta {
|
||||
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN DESCRIBING TABLE" << ": " << er.what();
|
||||
@ -738,7 +713,7 @@ namespace meta {
|
||||
file_schema.created_on_ = utils::GetMicroSecTimeStamp();
|
||||
file_schema.updated_time_ = file_schema.created_on_;
|
||||
file_schema.engine_type_ = table_schema.engine_type_;
|
||||
GetTableFilePath(file_schema);
|
||||
utils::GetTableFilePath(options_, file_schema);
|
||||
|
||||
std::string id = "NULL"; //auto-increment
|
||||
std::string table_id = file_schema.table_id_;
|
||||
@ -783,15 +758,7 @@ namespace meta {
|
||||
}
|
||||
} // Scoped Connection
|
||||
|
||||
auto partition_path = GetTableDatePartitionPath(file_schema.table_id_, file_schema.date_);
|
||||
|
||||
if (!boost::filesystem::is_directory(partition_path)) {
|
||||
auto ret = boost::filesystem::create_directory(partition_path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
|
||||
return Status::DBTransactionError("Failed to create partition directory");
|
||||
}
|
||||
}
|
||||
return utils::CreateTableFilePath(options_, file_schema);
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
@ -876,7 +843,7 @@ namespace meta {
|
||||
}
|
||||
table_file.dimension_ = groups[table_file.table_id_].dimension_;
|
||||
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
|
||||
files.push_back(table_file);
|
||||
}
|
||||
@ -987,7 +954,7 @@ namespace meta {
|
||||
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
@ -1073,7 +1040,7 @@ namespace meta {
|
||||
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
|
||||
GetTableFilePath(table_file);
|
||||
utils::GetTableFilePath(options_, table_file);
|
||||
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
@ -1168,7 +1135,7 @@ namespace meta {
|
||||
|
||||
file_schema.dimension_ = table_schema.dimension_;
|
||||
|
||||
GetTableFilePath(file_schema);
|
||||
utils::GetTableFilePath(options_, file_schema);
|
||||
|
||||
table_files.emplace_back(file_schema);
|
||||
}
|
||||
@ -1479,6 +1446,11 @@ namespace meta {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) {
|
||||
// TODO
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MySQLMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
|
||||
|
||||
// std::lock_guard<std::recursive_mutex> lock(mysql_mutex);
|
||||
@ -1623,11 +1595,10 @@ namespace meta {
|
||||
|
||||
table_file.date_ = resRow["date"];
|
||||
|
||||
GetTableFilePath(table_file);
|
||||
utils::DeleteTableFilePath(options_, table_file);
|
||||
|
||||
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = "
|
||||
<< table_file.location_ << std::endl;
|
||||
boost::filesystem::remove(table_file.location_);
|
||||
|
||||
idsToDelete.emplace_back(std::to_string(table_file.id_));
|
||||
}
|
||||
@ -1700,10 +1671,7 @@ namespace meta {
|
||||
std::string table_id;
|
||||
resRow["table_id"].to_string(table_id);
|
||||
|
||||
auto table_path = GetTablePath(table_id);
|
||||
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
utils::DeleteTablePath(options_, table_id);
|
||||
|
||||
idsToDeleteSS << "id = " << std::to_string(id) << " OR ";
|
||||
}
|
||||
|
||||
@ -40,8 +40,12 @@ namespace meta {
|
||||
const std::vector<size_t>& ids,
|
||||
TableFilesSchema& table_files) override;
|
||||
|
||||
virtual Status HasNonIndexFiles(const std::string& table_id, bool& has) override;
|
||||
|
||||
virtual Status UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
virtual Status UpdateTableFilesToIndex(const std::string& table_id) override;
|
||||
|
||||
virtual Status UpdateTableFiles(TableFilesSchema& files) override;
|
||||
|
||||
virtual Status FilesToSearch(const std::string& table_id,
|
||||
@ -71,9 +75,6 @@ namespace meta {
|
||||
Status NextFileId(std::string& file_id);
|
||||
Status NextTableId(std::string& table_id);
|
||||
Status DiscardFiles(long long to_discard_size);
|
||||
std::string GetTablePath(const std::string& table_id);
|
||||
std::string GetTableDatePartitionPath(const std::string& table_id, DateT& date);
|
||||
void GetTableFilePath(TableFileSchema& group_file);
|
||||
Status Initialize();
|
||||
|
||||
const DBMetaOptions options_;
|
||||
|
||||
126
cpp/src/db/NewMemManager.cpp
Normal file
126
cpp/src/db/NewMemManager.cpp
Normal file
@ -0,0 +1,126 @@
|
||||
#include "NewMemManager.h"
|
||||
#include "VectorSource.h"
|
||||
#include "Log.h"
|
||||
#include "Constants.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
NewMemManager::MemTablePtr NewMemManager::GetMemByTable(const std::string &table_id) {
|
||||
auto memIt = mem_id_map_.find(table_id);
|
||||
if (memIt != mem_id_map_.end()) {
|
||||
return memIt->second;
|
||||
}
|
||||
|
||||
mem_id_map_[table_id] = std::make_shared<MemTable>(table_id, meta_, options_);
|
||||
return mem_id_map_[table_id];
|
||||
}
|
||||
|
||||
Status NewMemManager::InsertVectors(const std::string &table_id_,
|
||||
size_t n_,
|
||||
const float *vectors_,
|
||||
IDNumbers &vector_ids_) {
|
||||
|
||||
while (GetCurrentMem() > options_.maximum_memory) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
LOG(DEBUG) << "NewMemManager::InsertVectors: mutable mem = " << GetCurrentMutableMem() <<
|
||||
", immutable mem = " << GetCurrentImmutableMem() << ", total mem = " << GetCurrentMem();
|
||||
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
|
||||
return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_);
|
||||
}
|
||||
|
||||
Status NewMemManager::InsertVectorsNoLock(const std::string &table_id,
|
||||
size_t n,
|
||||
const float *vectors,
|
||||
IDNumbers &vector_ids) {
|
||||
|
||||
MemTablePtr mem = GetMemByTable(table_id);
|
||||
VectorSource::Ptr source = std::make_shared<VectorSource>(n, vectors);
|
||||
|
||||
auto status = mem->Add(source);
|
||||
if (status.ok()) {
|
||||
vector_ids = source->GetVectorIds();
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
Status NewMemManager::ToImmutable() {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
MemIdMap temp_map;
|
||||
for (auto &kv: mem_id_map_) {
|
||||
if (kv.second->Empty()) {
|
||||
//empty table, no need to serialize
|
||||
temp_map.insert(kv);
|
||||
} else {
|
||||
immu_mem_list_.push_back(kv.second);
|
||||
}
|
||||
}
|
||||
|
||||
mem_id_map_.swap(temp_map);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status NewMemManager::Serialize(std::set<std::string> &table_ids) {
|
||||
ToImmutable();
|
||||
std::unique_lock<std::mutex> lock(serialization_mtx_);
|
||||
table_ids.clear();
|
||||
for (auto &mem : immu_mem_list_) {
|
||||
mem->Serialize();
|
||||
table_ids.insert(mem->GetTableId());
|
||||
}
|
||||
immu_mem_list_.clear();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status NewMemManager::EraseMemVector(const std::string &table_id) {
|
||||
{//erase MemVector from rapid-insert cache
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
mem_id_map_.erase(table_id);
|
||||
}
|
||||
|
||||
{//erase MemVector from serialize cache
|
||||
std::unique_lock<std::mutex> lock(serialization_mtx_);
|
||||
MemList temp_list;
|
||||
for (auto &mem : immu_mem_list_) {
|
||||
if (mem->GetTableId() != table_id) {
|
||||
temp_list.push_back(mem);
|
||||
}
|
||||
}
|
||||
immu_mem_list_.swap(temp_list);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
size_t NewMemManager::GetCurrentMutableMem() {
|
||||
size_t total_mem = 0;
|
||||
for (auto &kv : mem_id_map_) {
|
||||
auto memTable = kv.second;
|
||||
total_mem += memTable->GetCurrentMem();
|
||||
}
|
||||
return total_mem;
|
||||
}
|
||||
|
||||
size_t NewMemManager::GetCurrentImmutableMem() {
|
||||
size_t total_mem = 0;
|
||||
for (auto &mem_table : immu_mem_list_) {
|
||||
total_mem += mem_table->GetCurrentMem();
|
||||
}
|
||||
return total_mem;
|
||||
}
|
||||
|
||||
size_t NewMemManager::GetCurrentMem() {
|
||||
return GetCurrentMutableMem() + GetCurrentImmutableMem();
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
61
cpp/src/db/NewMemManager.h
Normal file
61
cpp/src/db/NewMemManager.h
Normal file
@ -0,0 +1,61 @@
|
||||
#pragma once
|
||||
|
||||
#include "Meta.h"
|
||||
#include "MemTable.h"
|
||||
#include "Status.h"
|
||||
#include "MemManagerAbstract.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class NewMemManager : public MemManagerAbstract {
|
||||
public:
|
||||
using MetaPtr = meta::Meta::Ptr;
|
||||
using Ptr = std::shared_ptr<NewMemManager>;
|
||||
using MemTablePtr = typename MemTable::Ptr;
|
||||
|
||||
NewMemManager(const std::shared_ptr<meta::Meta> &meta, const Options &options)
|
||||
: meta_(meta), options_(options) {}
|
||||
|
||||
Status InsertVectors(const std::string &table_id,
|
||||
size_t n, const float *vectors, IDNumbers &vector_ids) override;
|
||||
|
||||
Status Serialize(std::set<std::string> &table_ids) override;
|
||||
|
||||
Status EraseMemVector(const std::string &table_id) override;
|
||||
|
||||
size_t GetCurrentMutableMem() override;
|
||||
|
||||
size_t GetCurrentImmutableMem() override;
|
||||
|
||||
size_t GetCurrentMem() override;
|
||||
|
||||
private:
|
||||
MemTablePtr GetMemByTable(const std::string &table_id);
|
||||
|
||||
Status InsertVectorsNoLock(const std::string &table_id,
|
||||
size_t n, const float *vectors, IDNumbers &vector_ids);
|
||||
Status ToImmutable();
|
||||
|
||||
using MemIdMap = std::map<std::string, MemTablePtr>;
|
||||
using MemList = std::vector<MemTablePtr>;
|
||||
MemIdMap mem_id_map_;
|
||||
MemList immu_mem_list_;
|
||||
MetaPtr meta_;
|
||||
Options options_;
|
||||
std::mutex mutex_;
|
||||
std::mutex serialization_mtx_;
|
||||
}; // NewMemManager
|
||||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
@ -8,6 +8,7 @@
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -43,6 +44,7 @@ private:
|
||||
|
||||
struct DBMetaOptions {
|
||||
std::string path;
|
||||
std::vector<std::string> slave_paths;
|
||||
std::string backend_uri;
|
||||
ArchiveConf archive_conf = ArchiveConf("delete");
|
||||
}; // DBMetaOptions
|
||||
@ -61,6 +63,7 @@ struct Options {
|
||||
size_t index_trigger_size = ONE_GB; //unit: byte
|
||||
DBMetaOptions meta;
|
||||
int mode = MODE::SINGLE;
|
||||
float maximum_memory = 4 * ONE_GB;
|
||||
}; // Options
|
||||
|
||||
|
||||
|
||||
@ -4,14 +4,58 @@
|
||||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
#include "Utils.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "Log.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace utils {
|
||||
|
||||
namespace {
|
||||
|
||||
static const std::string TABLES_FOLDER = "/tables/";
|
||||
|
||||
static uint64_t index_file_counter = 0;
|
||||
static std::mutex index_file_counter_mutex;
|
||||
|
||||
std::string ConstructParentFolder(const std::string& db_path, const meta::TableFileSchema& table_file) {
|
||||
std::string table_path = db_path + TABLES_FOLDER + table_file.table_id_;
|
||||
std::string partition_path = table_path + "/" + std::to_string(table_file.date_);
|
||||
return partition_path;
|
||||
}
|
||||
|
||||
std::string GetTableFileParentFolder(const DBMetaOptions& options, const meta::TableFileSchema& table_file) {
|
||||
uint64_t path_count = options.slave_paths.size() + 1;
|
||||
std::string target_path = options.path;
|
||||
uint64_t index = 0;
|
||||
|
||||
if(meta::TableFileSchema::INDEX == table_file.file_type_) {
|
||||
// index file is large file and to be persisted permanently
|
||||
// we need to distribute index files to each db_path averagely
|
||||
// round robin according to a file counter
|
||||
std::lock_guard<std::mutex> lock(index_file_counter_mutex);
|
||||
index = index_file_counter % path_count;
|
||||
index_file_counter++;
|
||||
} else {
|
||||
// for other type files, they could be merged or deleted
|
||||
// so we round robin according to their file id
|
||||
index = table_file.id_ % path_count;
|
||||
}
|
||||
|
||||
if (index > 0) {
|
||||
target_path = options.slave_paths[index - 1];
|
||||
}
|
||||
|
||||
return ConstructParentFolder(target_path, table_file);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
long GetMicroSecTimeStamp() {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
@ -20,6 +64,82 @@ long GetMicroSecTimeStamp() {
|
||||
return micros;
|
||||
}
|
||||
|
||||
Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id) {
|
||||
std::string db_path = options.path;
|
||||
std::string table_path = db_path + TABLES_FOLDER + table_id;
|
||||
auto status = server::CommonUtil::CreateDirectory(table_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
|
||||
for(auto& path : options.slave_paths) {
|
||||
table_path = path + TABLES_FOLDER + table_id;
|
||||
status = server::CommonUtil::CreateDirectory(table_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id) {
|
||||
std::string db_path = options.path;
|
||||
std::string table_path = db_path + TABLES_FOLDER + table_id;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
|
||||
for(auto& path : options.slave_paths) {
|
||||
table_path = path + TABLES_FOLDER + table_id;
|
||||
boost::filesystem::remove_all(table_path);
|
||||
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
std::string parent_path = GetTableFileParentFolder(options, table_file);
|
||||
|
||||
auto status = server::CommonUtil::CreateDirectory(parent_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << parent_path << " Error";
|
||||
return Status::DBTransactionError("Failed to create partition directory");
|
||||
}
|
||||
|
||||
table_file.location_ = parent_path + "/" + table_file.file_id_;
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
std::string parent_path = ConstructParentFolder(options.path, table_file);
|
||||
std::string file_path = parent_path + "/" + table_file.file_id_;
|
||||
if(boost::filesystem::exists(file_path)) {
|
||||
table_file.location_ = file_path;
|
||||
return Status::OK();
|
||||
} else {
|
||||
for(auto& path : options.slave_paths) {
|
||||
parent_path = ConstructParentFolder(path, table_file);
|
||||
file_path = parent_path + "/" + table_file.file_id_;
|
||||
if(boost::filesystem::exists(file_path)) {
|
||||
table_file.location_ = file_path;
|
||||
return Status::OK();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::Error("Table file doesn't exist: " + table_file.file_id_);
|
||||
}
|
||||
|
||||
Status DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
utils::GetTableFilePath(options, table_file);
|
||||
boost::filesystem::remove(table_file.location_);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
||||
@ -5,6 +5,10 @@
|
||||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include "Options.h"
|
||||
#include "MetaTypes.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -13,6 +17,13 @@ namespace utils {
|
||||
|
||||
long GetMicroSecTimeStamp();
|
||||
|
||||
Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id);
|
||||
Status DeleteTablePath(const DBMetaOptions& options, const std::string& table_id);
|
||||
|
||||
Status CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
|
||||
Status GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
|
||||
Status DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
|
||||
|
||||
} // namespace utils
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
||||
67
cpp/src/db/VectorSource.cpp
Normal file
67
cpp/src/db/VectorSource.cpp
Normal file
@ -0,0 +1,67 @@
|
||||
#include "VectorSource.h"
|
||||
#include "ExecutionEngine.h"
|
||||
#include "EngineFactory.h"
|
||||
#include "Log.h"
|
||||
#include "metrics/Metrics.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
|
||||
VectorSource::VectorSource(const size_t &n,
|
||||
const float *vectors) :
|
||||
n_(n),
|
||||
vectors_(vectors),
|
||||
id_generator_(new SimpleIDGenerator()) {
|
||||
current_num_vectors_added = 0;
|
||||
}
|
||||
|
||||
Status VectorSource::Add(const ExecutionEnginePtr &execution_engine,
|
||||
const meta::TableFileSchema &table_file_schema,
|
||||
const size_t &num_vectors_to_add,
|
||||
size_t &num_vectors_added) {
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
|
||||
num_vectors_added = current_num_vectors_added + num_vectors_to_add <= n_ ?
|
||||
num_vectors_to_add : n_ - current_num_vectors_added;
|
||||
IDNumbers vector_ids_to_add;
|
||||
id_generator_->GetNextIDNumbers(num_vectors_added, vector_ids_to_add);
|
||||
Status status = execution_engine->AddWithIds(num_vectors_added,
|
||||
vectors_ + current_num_vectors_added * table_file_schema.dimension_,
|
||||
vector_ids_to_add.data());
|
||||
if (status.ok()) {
|
||||
current_num_vectors_added += num_vectors_added;
|
||||
vector_ids_.insert(vector_ids_.end(),
|
||||
std::make_move_iterator(vector_ids_to_add.begin()),
|
||||
std::make_move_iterator(vector_ids_to_add.end()));
|
||||
} else {
|
||||
ENGINE_LOG_ERROR << "VectorSource::Add failed: " + status.ToString();
|
||||
}
|
||||
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast<int>(n_),
|
||||
static_cast<int>(table_file_schema.dimension_),
|
||||
total_time);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
size_t VectorSource::GetNumVectorsAdded() {
|
||||
return current_num_vectors_added;
|
||||
}
|
||||
|
||||
bool VectorSource::AllAdded() {
|
||||
return (current_num_vectors_added == n_);
|
||||
}
|
||||
|
||||
IDNumbers VectorSource::GetVectorIds() {
|
||||
return vector_ids_;
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
46
cpp/src/db/VectorSource.h
Normal file
46
cpp/src/db/VectorSource.h
Normal file
@ -0,0 +1,46 @@
|
||||
#pragma once
|
||||
|
||||
#include "Status.h"
|
||||
#include "Meta.h"
|
||||
#include "IDGenerator.h"
|
||||
#include "ExecutionEngine.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class VectorSource {
|
||||
|
||||
public:
|
||||
|
||||
using Ptr = std::shared_ptr<VectorSource>;
|
||||
|
||||
VectorSource(const size_t &n, const float *vectors);
|
||||
|
||||
Status Add(const ExecutionEnginePtr &execution_engine,
|
||||
const meta::TableFileSchema &table_file_schema,
|
||||
const size_t &num_vectors_to_add,
|
||||
size_t &num_vectors_added);
|
||||
|
||||
size_t GetNumVectorsAdded();
|
||||
|
||||
bool AllAdded();
|
||||
|
||||
IDNumbers GetVectorIds();
|
||||
|
||||
private:
|
||||
|
||||
const size_t n_;
|
||||
const float *vectors_;
|
||||
IDNumbers vector_ids_;
|
||||
|
||||
size_t current_num_vectors_added;
|
||||
|
||||
IDGenerator *id_generator_;
|
||||
|
||||
}; //VectorSource
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
@ -30,11 +30,20 @@ void CollectDurationMetrics(int index_type, double total_time) {
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetMetricType() {
|
||||
server::ServerConfig &config = server::ServerConfig::GetInstance();
|
||||
server::ConfigNode engine_config = config.GetConfig(server::CONFIG_ENGINE);
|
||||
return engine_config.GetValue(server::CONFIG_METRICTYPE, "L2");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SearchTask::SearchTask()
|
||||
: IScheduleTask(ScheduleTaskType::kSearch) {
|
||||
|
||||
std::string metric_type = GetMetricType();
|
||||
if(metric_type != "L2") {
|
||||
metric_l2 = false;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<IScheduleTask> SearchTask::Execute() {
|
||||
@ -71,7 +80,7 @@ std::shared_ptr<IScheduleTask> SearchTask::Execute() {
|
||||
rc.Record("cluster result");
|
||||
|
||||
//step 4: pick up topk result
|
||||
SearchTask::TopkResult(result_set, inner_k, context->GetResult());
|
||||
SearchTask::TopkResult(result_set, inner_k, metric_l2, context->GetResult());
|
||||
rc.Record("reduce topk");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
@ -125,7 +134,8 @@ Status SearchTask::ClusterResult(const std::vector<long> &output_ids,
|
||||
|
||||
Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
SearchContext::Id2DistanceMap &distance_target,
|
||||
uint64_t topk) {
|
||||
uint64_t topk,
|
||||
bool ascending) {
|
||||
//Note: the score_src and score_target are already arranged by score in ascending order
|
||||
if(distance_src.empty()) {
|
||||
SERVER_LOG_WARNING << "Empty distance source array";
|
||||
@ -161,15 +171,27 @@ Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
break;
|
||||
}
|
||||
|
||||
//compare score, put smallest score to score_merged one by one
|
||||
//compare score,
|
||||
// if ascending = true, put smallest score to score_merged one by one
|
||||
// else, put largest score to score_merged one by one
|
||||
auto& src_pair = distance_src[src_index];
|
||||
auto& target_pair = distance_target[target_index];
|
||||
if(src_pair.second > target_pair.second) {
|
||||
distance_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
if(ascending){
|
||||
if(src_pair.second > target_pair.second) {
|
||||
distance_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
} else {
|
||||
distance_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
}
|
||||
} else {
|
||||
distance_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
if(src_pair.second < target_pair.second) {
|
||||
distance_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
} else {
|
||||
distance_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
}
|
||||
}
|
||||
|
||||
//score_merged.size() already equal topk
|
||||
@ -185,6 +207,7 @@ Status SearchTask::MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
|
||||
Status SearchTask::TopkResult(SearchContext::ResultSet &result_src,
|
||||
uint64_t topk,
|
||||
bool ascending,
|
||||
SearchContext::ResultSet &result_target) {
|
||||
if (result_target.empty()) {
|
||||
result_target.swap(result_src);
|
||||
@ -200,7 +223,7 @@ Status SearchTask::TopkResult(SearchContext::ResultSet &result_src,
|
||||
for (size_t i = 0; i < result_src.size(); i++) {
|
||||
SearchContext::Id2DistanceMap &score_src = result_src[i];
|
||||
SearchContext::Id2DistanceMap &score_target = result_target[i];
|
||||
SearchTask::MergeResult(score_src, score_target, topk);
|
||||
SearchTask::MergeResult(score_src, score_target, topk, ascending);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
||||
@ -27,10 +27,12 @@ public:
|
||||
|
||||
static Status MergeResult(SearchContext::Id2DistanceMap &distance_src,
|
||||
SearchContext::Id2DistanceMap &distance_target,
|
||||
uint64_t topk);
|
||||
uint64_t topk,
|
||||
bool ascending);
|
||||
|
||||
static Status TopkResult(SearchContext::ResultSet &result_src,
|
||||
uint64_t topk,
|
||||
bool ascending,
|
||||
SearchContext::ResultSet &result_target);
|
||||
|
||||
public:
|
||||
@ -38,6 +40,7 @@ public:
|
||||
int index_type_ = 0; //for metrics
|
||||
ExecutionEnginePtr index_engine_;
|
||||
std::vector<SearchContextPtr> search_contexts_;
|
||||
bool metric_l2 = true;
|
||||
};
|
||||
|
||||
using SearchTaskPtr = std::shared_ptr<SearchTask>;
|
||||
|
||||
@ -8,6 +8,7 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <time.h>
|
||||
#include <chrono>
|
||||
#include <unistd.h>
|
||||
|
||||
using namespace ::milvus;
|
||||
@ -21,7 +22,7 @@ namespace {
|
||||
static constexpr int64_t NQ = 10;
|
||||
static constexpr int64_t TOP_K = 10;
|
||||
static constexpr int64_t SEARCH_TARGET = 5000; //change this value, result is different
|
||||
static constexpr int64_t ADD_VECTOR_LOOP = 5;
|
||||
static constexpr int64_t ADD_VECTOR_LOOP = 10;
|
||||
|
||||
#define BLOCK_SPLITER std::cout << "===========================================" << std::endl;
|
||||
|
||||
@ -34,26 +35,17 @@ namespace {
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
void PrintRecordIdArray(const std::vector<int64_t>& record_ids) {
|
||||
BLOCK_SPLITER
|
||||
std::cout << "Returned id array count: " << record_ids.size() << std::endl;
|
||||
#if 0
|
||||
for(auto id : record_ids) {
|
||||
std::cout << std::to_string(id) << std::endl;
|
||||
}
|
||||
#endif
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
void PrintSearchResult(const std::vector<TopKQueryResult>& topk_query_result_array) {
|
||||
void PrintSearchResult(const std::vector<std::pair<int64_t, RowRecord>>& search_record_array,
|
||||
const std::vector<TopKQueryResult>& topk_query_result_array) {
|
||||
BLOCK_SPLITER
|
||||
std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl;
|
||||
|
||||
int32_t index = 0;
|
||||
for(auto& result : topk_query_result_array) {
|
||||
auto search_id = search_record_array[index].first;
|
||||
index++;
|
||||
std::cout << "No." << std::to_string(index) << " vector top "
|
||||
<< std::to_string(result.query_result_arrays.size())
|
||||
std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id)
|
||||
<< " top " << std::to_string(result.query_result_arrays.size())
|
||||
<< " search result:" << std::endl;
|
||||
for(auto& item : result.query_result_arrays) {
|
||||
std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance);
|
||||
@ -91,13 +83,13 @@ namespace {
|
||||
|
||||
std::string GetTableName() {
|
||||
static std::string s_id(CurrentTime());
|
||||
return s_id;
|
||||
return "tbl_" + s_id;
|
||||
}
|
||||
|
||||
TableSchema BuildTableSchema() {
|
||||
TableSchema tb_schema;
|
||||
tb_schema.table_name = TABLE_NAME;
|
||||
tb_schema.index_type = IndexType::cpu_idmap;
|
||||
tb_schema.index_type = IndexType::gpu_ivfflat;
|
||||
tb_schema.dimension = TABLE_DIMENSION;
|
||||
tb_schema.store_raw_vector = true;
|
||||
|
||||
@ -126,6 +118,66 @@ namespace {
|
||||
std::cout << "Waiting " << seconds << " seconds ..." << std::endl;
|
||||
sleep(seconds);
|
||||
}
|
||||
|
||||
class TimeRecorder {
|
||||
public:
|
||||
TimeRecorder(const std::string& title)
|
||||
: title_(title) {
|
||||
start_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
~TimeRecorder() {
|
||||
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
|
||||
long span = (std::chrono::duration_cast<std::chrono::milliseconds> (end - start_)).count();
|
||||
std::cout << title_ << " totally cost: " << span << " ms" << std::endl;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string title_;
|
||||
std::chrono::system_clock::time_point start_;
|
||||
};
|
||||
|
||||
void CheckResult(const std::vector<std::pair<int64_t, RowRecord>>& search_record_array,
|
||||
const std::vector<TopKQueryResult>& topk_query_result_array) {
|
||||
BLOCK_SPLITER
|
||||
int64_t index = 0;
|
||||
for(auto& result : topk_query_result_array) {
|
||||
auto result_id = result.query_result_arrays[0].id;
|
||||
auto search_id = search_record_array[index++].first;
|
||||
if(result_id != search_id) {
|
||||
std::cout << "The top 1 result is wrong: " << result_id
|
||||
<< " vs. " << search_id << std::endl;
|
||||
} else {
|
||||
std::cout << "Check result sucessfully" << std::endl;
|
||||
}
|
||||
}
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
void DoSearch(std::shared_ptr<Connection> conn,
|
||||
const std::vector<std::pair<int64_t, RowRecord>>& search_record_array,
|
||||
const std::string& phase_name) {
|
||||
std::vector<Range> query_range_array;
|
||||
Range rg;
|
||||
rg.start_value = CurrentTmDate();
|
||||
rg.end_value = CurrentTmDate();
|
||||
query_range_array.emplace_back(rg);
|
||||
|
||||
std::vector<RowRecord> record_array;
|
||||
for(auto& pair : search_record_array) {
|
||||
record_array.push_back(pair.second);
|
||||
}
|
||||
|
||||
std::vector<TopKQueryResult> topk_query_result_array;
|
||||
{
|
||||
TimeRecorder rc(phase_name);
|
||||
Status stat = conn->SearchVector(TABLE_NAME, record_array, query_range_array, TOP_K, topk_query_result_array);
|
||||
std::cout << "SearchVector function call status: " << stat.ToString() << std::endl;
|
||||
}
|
||||
|
||||
PrintSearchResult(search_record_array, topk_query_result_array);
|
||||
CheckResult(search_record_array, topk_query_result_array);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -179,30 +231,37 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
||||
PrintTableSchema(tb_schema);
|
||||
}
|
||||
|
||||
for(int i = 0; i < ADD_VECTOR_LOOP; i++){//add vectors
|
||||
std::vector<RowRecord> record_array;
|
||||
BuildVectors(i*BATCH_ROW_COUNT, (i+1)*BATCH_ROW_COUNT, record_array);
|
||||
std::vector<int64_t> record_ids;
|
||||
Status stat = conn->AddVector(TABLE_NAME, record_array, record_ids);
|
||||
std::cout << "AddVector function call status: " << stat.ToString() << std::endl;
|
||||
PrintRecordIdArray(record_ids);
|
||||
std::vector<std::pair<int64_t, RowRecord>> search_record_array;
|
||||
{//add vectors
|
||||
for (int i = 0; i < ADD_VECTOR_LOOP; i++) {//add vectors
|
||||
std::vector<RowRecord> record_array;
|
||||
int64_t begin_index = i * BATCH_ROW_COUNT;
|
||||
BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array);
|
||||
std::vector<int64_t> record_ids;
|
||||
Status stat = conn->AddVector(TABLE_NAME, record_array, record_ids);
|
||||
std::cout << "AddVector function call status: " << stat.ToString() << std::endl;
|
||||
std::cout << "Returned id array count: " << record_ids.size() << std::endl;
|
||||
|
||||
if(search_record_array.size() < NQ) {
|
||||
search_record_array.push_back(
|
||||
std::make_pair(record_ids[SEARCH_TARGET], record_array[SEARCH_TARGET]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{//search vectors
|
||||
{//search vectors without index
|
||||
Sleep(2);
|
||||
DoSearch(conn, search_record_array, "Search without index");
|
||||
}
|
||||
|
||||
std::vector<RowRecord> record_array;
|
||||
BuildVectors(SEARCH_TARGET, SEARCH_TARGET + NQ, record_array);
|
||||
{//wait unit build index finish
|
||||
std::cout << "Wait until build all index done" << std::endl;
|
||||
Status stat = conn->BuildIndex(TABLE_NAME);
|
||||
std::cout << "BuildIndex function call status: " << stat.ToString() << std::endl;
|
||||
}
|
||||
|
||||
std::vector<Range> query_range_array;
|
||||
Range rg;
|
||||
rg.start_value = CurrentTmDate();
|
||||
rg.end_value = CurrentTmDate();
|
||||
query_range_array.emplace_back(rg);
|
||||
std::vector<TopKQueryResult> topk_query_result_array;
|
||||
Status stat = conn->SearchVector(TABLE_NAME, record_array, query_range_array, TOP_K, topk_query_result_array);
|
||||
std::cout << "SearchVector function call status: " << stat.ToString() << std::endl;
|
||||
PrintSearchResult(topk_query_result_array);
|
||||
{//search vectors after build index finish
|
||||
DoSearch(conn, search_record_array, "Search after build index finish");
|
||||
}
|
||||
|
||||
{//delete table
|
||||
|
||||
@ -18,6 +18,7 @@ enum class IndexType {
|
||||
invalid = 0,
|
||||
cpu_idmap,
|
||||
gpu_ivfflat,
|
||||
gpu_ivfsq8,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -180,6 +181,17 @@ public:
|
||||
virtual Status DeleteTable(const std::string &table_name) = 0;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Build index method
|
||||
*
|
||||
* This method is used to build index for whole table
|
||||
*
|
||||
* @param table_name, table name is going to be build index.
|
||||
*
|
||||
* @return Indicate if build index successfully.
|
||||
*/
|
||||
virtual Status BuildIndex(const std::string &table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Add vector to table
|
||||
*
|
||||
|
||||
@ -126,6 +126,22 @@ ClientProxy::DeleteTable(const std::string &table_name) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ClientProxy::BuildIndex(const std::string &table_name) {
|
||||
if(!IsConnected()) {
|
||||
return Status(StatusCode::NotConnected, "not connected to server");
|
||||
}
|
||||
|
||||
try {
|
||||
ClientPtr()->interface()->BuildIndex(table_name);
|
||||
|
||||
} catch ( std::exception& ex) {
|
||||
return Status(StatusCode::UnknownError, "failed to build index: " + std::string(ex.what()));
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ClientProxy::AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
|
||||
@ -27,6 +27,8 @@ public:
|
||||
|
||||
virtual Status DeleteTable(const std::string &table_name) override;
|
||||
|
||||
virtual Status BuildIndex(const std::string &table_name) override;
|
||||
|
||||
virtual Status AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
std::vector<int64_t> &id_array) override;
|
||||
|
||||
@ -66,6 +66,11 @@ ConnectionImpl::DeleteTable(const std::string &table_name) {
|
||||
return client_proxy_->DeleteTable(table_name);
|
||||
}
|
||||
|
||||
Status
|
||||
ConnectionImpl::BuildIndex(const std::string &table_name) {
|
||||
return client_proxy_->BuildIndex(table_name);
|
||||
}
|
||||
|
||||
Status
|
||||
ConnectionImpl::AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
|
||||
@ -29,6 +29,8 @@ public:
|
||||
|
||||
virtual Status DeleteTable(const std::string &table_name) override;
|
||||
|
||||
virtual Status BuildIndex(const std::string &table_name) override;
|
||||
|
||||
virtual Status AddVector(const std::string &table_name,
|
||||
const std::vector<RowRecord> &record_array,
|
||||
std::vector<int64_t> &id_array) override;
|
||||
|
||||
@ -8,6 +8,7 @@
|
||||
#include "ServerConfig.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/StringHelpFunctions.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
@ -19,10 +20,22 @@ DBWrapper::DBWrapper() {
|
||||
opt.meta.backend_uri = config.GetValue(CONFIG_DB_URL);
|
||||
std::string db_path = config.GetValue(CONFIG_DB_PATH);
|
||||
opt.meta.path = db_path + "/db";
|
||||
|
||||
std::string db_slave_path = config.GetValue(CONFIG_DB_SLAVE_PATH);
|
||||
StringHelpFunctions::SplitStringByDelimeter(db_slave_path, ";", opt.meta.slave_paths);
|
||||
|
||||
int64_t index_size = config.GetInt64Value(CONFIG_DB_INDEX_TRIGGER_SIZE);
|
||||
if(index_size > 0) {//ensure larger than zero, unit is MB
|
||||
opt.index_trigger_size = (size_t)index_size * engine::ONE_MB;
|
||||
}
|
||||
float maximum_memory = config.GetFloatValue(CONFIG_MAXMIMUM_MEMORY);
|
||||
if (maximum_memory > 1.0) {
|
||||
opt.maximum_memory = maximum_memory * engine::ONE_GB;
|
||||
}
|
||||
else {
|
||||
std::cout << "ERROR: maximum_memory should be at least 1 GB" << std::endl;
|
||||
kill(0, SIGUSR1);
|
||||
}
|
||||
|
||||
ConfigNode& serverConfig = ServerConfig::GetInstance().GetConfig(CONFIG_SERVER);
|
||||
std::string mode = serverConfig.GetValue(CONFIG_CLUSTER_MODE, "single");
|
||||
@ -59,6 +72,14 @@ DBWrapper::DBWrapper() {
|
||||
kill(0, SIGUSR1);
|
||||
}
|
||||
|
||||
for(auto& path : opt.meta.slave_paths) {
|
||||
err = CommonUtil::CreateDirectory(path);
|
||||
if(err != SERVER_SUCCESS) {
|
||||
std::cout << "ERROR! Failed to create database slave path: " << path << std::endl;
|
||||
kill(0, SIGUSR1);
|
||||
}
|
||||
}
|
||||
|
||||
std::string msg = opt.meta.path;
|
||||
try {
|
||||
zilliz::milvus::engine::DB::Open(opt, &db_);
|
||||
|
||||
@ -8,9 +8,11 @@
|
||||
#include "ServerConfig.h"
|
||||
#include "ThreadPoolServer.h"
|
||||
#include "DBWrapper.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
#include "milvus_types.h"
|
||||
#include "milvus_constants.h"
|
||||
#include "faiss/utils.h"
|
||||
|
||||
#include <thrift/protocol/TBinaryProtocol.h>
|
||||
#include <thrift/protocol/TJSONProtocol.h>
|
||||
@ -25,6 +27,8 @@
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
|
||||
//extern int distance_compute_blas_threshold;
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
@ -46,11 +50,12 @@ MilvusServer::StartService() {
|
||||
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode server_config = config.GetConfig(CONFIG_SERVER);
|
||||
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
std::string address = server_config.GetValue(CONFIG_SERVER_ADDRESS, "127.0.0.1");
|
||||
int32_t port = server_config.GetInt32Value(CONFIG_SERVER_PORT, 19530);
|
||||
std::string protocol = server_config.GetValue(CONFIG_SERVER_PROTOCOL, "binary");
|
||||
|
||||
faiss::distance_compute_blas_threshold = engine_config.GetInt32Value(CONFIG_DCBT,20);
|
||||
// std::cout<<"distance_compute_blas_threshold = "<< faiss::distance_compute_blas_threshold << std::endl;
|
||||
try {
|
||||
DBWrapper::DB();//initialize db
|
||||
|
||||
|
||||
@ -39,6 +39,12 @@ RequestHandler::DeleteTable(const std::string &table_name) {
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
||||
void
|
||||
RequestHandler::BuildIndex(const std::string &table_name) {
|
||||
BaseTaskPtr task_ptr = BuildIndexTask::Create(table_name);
|
||||
RequestScheduler::ExecTask(task_ptr);
|
||||
}
|
||||
|
||||
void
|
||||
RequestHandler::AddVector(std::vector<int64_t> &_return,
|
||||
const std::string &table_name,
|
||||
|
||||
@ -54,6 +54,18 @@ public:
|
||||
*/
|
||||
void DeleteTable(const std::string& table_name);
|
||||
|
||||
/**
|
||||
* @brief build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync.
|
||||
*
|
||||
* @param table_name, table name is going to be built index.
|
||||
*
|
||||
*
|
||||
* @param table_name
|
||||
*/
|
||||
void BuildIndex(const std::string &table_name);
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
|
||||
using namespace ::milvus;
|
||||
|
||||
namespace {
|
||||
@ -43,6 +43,7 @@ namespace {
|
||||
{SERVER_ILLEGAL_SEARCH_RESULT, thrift::ErrorCode::ILLEGAL_SEARCH_RESULT},
|
||||
{SERVER_CACHE_ERROR, thrift::ErrorCode::CACHE_FAILED},
|
||||
{DB_META_TRANSACTION_FAILED, thrift::ErrorCode::META_FAILED},
|
||||
{SERVER_BUILD_INDEX_ERROR, thrift::ErrorCode::BUILD_INDEX_ERROR},
|
||||
};
|
||||
|
||||
return code_map;
|
||||
|
||||
@ -8,6 +8,7 @@
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/TimeRecorder.h"
|
||||
#include "utils/ValidationUtil.h"
|
||||
#include "DBWrapper.h"
|
||||
#include "version.h"
|
||||
|
||||
@ -30,6 +31,7 @@ namespace {
|
||||
{0, engine::EngineType::INVALID},
|
||||
{1, engine::EngineType::FAISS_IDMAP},
|
||||
{2, engine::EngineType::FAISS_IVFFLAT},
|
||||
{3, engine::EngineType::FAISS_IVFSQ8},
|
||||
};
|
||||
|
||||
if(map_type.find(type) == map_type.end()) {
|
||||
@ -44,6 +46,7 @@ namespace {
|
||||
{engine::EngineType::INVALID, 0},
|
||||
{engine::EngineType::FAISS_IDMAP, 1},
|
||||
{engine::EngineType::FAISS_IVFFLAT, 2},
|
||||
{engine::EngineType::FAISS_IVFSQ8, 3},
|
||||
};
|
||||
|
||||
if(map_type.find(type) == map_type.end()) {
|
||||
@ -108,7 +111,13 @@ namespace {
|
||||
}
|
||||
|
||||
long days = (tt_end > tt_start) ? (tt_end - tt_start)/DAY_SECONDS : (tt_start - tt_end)/DAY_SECONDS;
|
||||
for(long i = 0; i <= days; i++) {
|
||||
if(days == 0) {
|
||||
error_code = SERVER_INVALID_TIME_RANGE;
|
||||
error_msg = "Invalid time range: " + range.start_value + " to " + range.end_value;
|
||||
return ;
|
||||
}
|
||||
|
||||
for(long i = 0; i < days; i++) {
|
||||
time_t tt_day = tt_start + DAY_SECONDS*i;
|
||||
tm tm_day;
|
||||
CommonUtil::ConvertTime(tt_day, tm_day);
|
||||
@ -133,19 +142,23 @@ BaseTaskPtr CreateTableTask::Create(const thrift::TableSchema& schema) {
|
||||
|
||||
ServerError CreateTableTask::OnExecute() {
|
||||
TimeRecorder rc("CreateTableTask");
|
||||
|
||||
|
||||
try {
|
||||
//step 1: check arguments
|
||||
if(schema_.table_name.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
}
|
||||
if(schema_.dimension <= 0) {
|
||||
return SetError(SERVER_INVALID_TABLE_DIMENSION, "Invalid table dimension: " + std::to_string(schema_.dimension));
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(schema_.table_name);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
engine::EngineType engine_type = EngineType(schema_.index_type);
|
||||
if(engine_type == engine::EngineType::INVALID) {
|
||||
return SetError(SERVER_INVALID_INDEX_TYPE, "Invalid index type: " + std::to_string(schema_.index_type));
|
||||
res = ValidateTableDimension(schema_.dimension);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
res = ValidateTableIndexType(schema_.index_type);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
//step 2: construct table schema
|
||||
@ -187,8 +200,10 @@ ServerError DescribeTableTask::OnExecute() {
|
||||
|
||||
try {
|
||||
//step 1: check arguments
|
||||
if(table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
//step 2: get table info
|
||||
@ -213,6 +228,39 @@ ServerError DescribeTableTask::OnExecute() {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
BuildIndexTask::BuildIndexTask(const std::string& table_name)
|
||||
: BaseTask(DDL_DML_TASK_GROUP),
|
||||
table_name_(table_name) {
|
||||
}
|
||||
|
||||
BaseTaskPtr BuildIndexTask::Create(const std::string& table_name) {
|
||||
return std::shared_ptr<BaseTask>(new BuildIndexTask(table_name));
|
||||
}
|
||||
|
||||
ServerError BuildIndexTask::OnExecute() {
|
||||
try {
|
||||
TimeRecorder rc("BuildIndexTask");
|
||||
|
||||
//step 1: check arguments
|
||||
if(table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
}
|
||||
|
||||
//step 2: check table existence
|
||||
engine::Status stat = DBWrapper::DB()->BuildIndex(table_name_);
|
||||
if(!stat.ok()) {
|
||||
return SetError(SERVER_BUILD_INDEX_ERROR, "Engine failed: " + stat.ToString());
|
||||
}
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
} catch (std::exception& ex) {
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
HasTableTask::HasTableTask(const std::string& table_name, bool& has_table)
|
||||
: BaseTask(DDL_DML_TASK_GROUP),
|
||||
@ -230,10 +278,11 @@ ServerError HasTableTask::OnExecute() {
|
||||
TimeRecorder rc("HasTableTask");
|
||||
|
||||
//step 1: check arguments
|
||||
if(table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
//step 2: check table existence
|
||||
engine::Status stat = DBWrapper::DB()->HasTable(table_name_, has_table_);
|
||||
if(!stat.ok()) {
|
||||
@ -264,8 +313,10 @@ ServerError DeleteTableTask::OnExecute() {
|
||||
TimeRecorder rc("DeleteTableTask");
|
||||
|
||||
//step 1: check arguments
|
||||
if (table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
//step 2: check table existence
|
||||
@ -290,7 +341,7 @@ ServerError DeleteTableTask::OnExecute() {
|
||||
}
|
||||
|
||||
rc.Record("deleta table");
|
||||
rc.Elapse("totally cost");
|
||||
rc.Elapse("total cost");
|
||||
} catch (std::exception& ex) {
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
}
|
||||
@ -346,8 +397,10 @@ ServerError AddVectorTask::OnExecute() {
|
||||
TimeRecorder rc("AddVectorTask");
|
||||
|
||||
//step 1: check arguments
|
||||
if (table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
if(record_array_.empty()) {
|
||||
@ -394,7 +447,7 @@ ServerError AddVectorTask::OnExecute() {
|
||||
}
|
||||
|
||||
rc.Record("do insert");
|
||||
rc.Elapse("totally cost");
|
||||
rc.Elapse("total cost");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
@ -435,8 +488,10 @@ ServerError SearchVectorTask::OnExecute() {
|
||||
TimeRecorder rc("SearchVectorTask");
|
||||
|
||||
//step 1: check arguments
|
||||
if (table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
if(top_k_ <= 0) {
|
||||
@ -522,7 +577,7 @@ ServerError SearchVectorTask::OnExecute() {
|
||||
result_array_.emplace_back(thrift_topk_result);
|
||||
}
|
||||
rc.Record("construct result");
|
||||
rc.Elapse("totally cost");
|
||||
rc.Elapse("total cost");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
@ -548,8 +603,10 @@ ServerError GetTableRowCountTask::OnExecute() {
|
||||
TimeRecorder rc("GetTableRowCountTask");
|
||||
|
||||
//step 1: check arguments
|
||||
if (table_name_.empty()) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
|
||||
ServerError res = SERVER_SUCCESS;
|
||||
res = ValidateTableName(table_name_);
|
||||
if(res != SERVER_SUCCESS) {
|
||||
return res;
|
||||
}
|
||||
|
||||
//step 2: get row count
|
||||
@ -561,7 +618,7 @@ ServerError GetTableRowCountTask::OnExecute() {
|
||||
|
||||
row_count_ = (int64_t) row_count;
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
rc.Elapse("total cost");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
|
||||
|
||||
@ -75,6 +75,21 @@ protected:
|
||||
ServerError OnExecute() override;
|
||||
|
||||
|
||||
private:
|
||||
std::string table_name_;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
class BuildIndexTask : public BaseTask {
|
||||
public:
|
||||
static BaseTaskPtr Create(const std::string& table_name);
|
||||
|
||||
protected:
|
||||
BuildIndexTask(const std::string& table_name);
|
||||
|
||||
ServerError OnExecute() override;
|
||||
|
||||
|
||||
private:
|
||||
std::string table_name_;
|
||||
};
|
||||
@ -174,4 +189,4 @@ private:
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,9 +23,11 @@ static const std::string CONFIG_CLUSTER_MODE = "mode";
|
||||
static const std::string CONFIG_DB = "db_config";
|
||||
static const std::string CONFIG_DB_URL = "db_backend_url";
|
||||
static const std::string CONFIG_DB_PATH = "db_path";
|
||||
static const std::string CONFIG_DB_SLAVE_PATH = "db_slave_path";
|
||||
static const std::string CONFIG_DB_INDEX_TRIGGER_SIZE = "index_building_threshold";
|
||||
static const std::string CONFIG_DB_ARCHIVE_DISK = "archive_disk_threshold";
|
||||
static const std::string CONFIG_DB_ARCHIVE_DAYS = "archive_days_threshold";
|
||||
static const std::string CONFIG_MAXMIMUM_MEMORY = "maximum_memory";
|
||||
|
||||
static const std::string CONFIG_LOG = "log_config";
|
||||
|
||||
@ -44,6 +46,9 @@ static const std::string CONFIG_METRIC_PROMETHEUS_PORT = "port";
|
||||
|
||||
static const std::string CONFIG_ENGINE = "engine_config";
|
||||
static const std::string CONFIG_NPROBE = "nprobe";
|
||||
static const std::string CONFIG_NLIST = "nlist";
|
||||
static const std::string CONFIG_DCBT = "use_blas_threshold";
|
||||
static const std::string CONFIG_METRICTYPE = "metric_type";
|
||||
|
||||
class ServerConfig {
|
||||
public:
|
||||
|
||||
@ -590,6 +590,193 @@ uint32_t MilvusService_DeleteTable_presult::read(::apache::thrift::protocol::TPr
|
||||
}
|
||||
|
||||
|
||||
MilvusService_BuildIndex_args::~MilvusService_BuildIndex_args() throw() {
|
||||
}
|
||||
|
||||
|
||||
uint32_t MilvusService_BuildIndex_args::read(::apache::thrift::protocol::TProtocol* iprot) {
|
||||
|
||||
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
|
||||
uint32_t xfer = 0;
|
||||
std::string fname;
|
||||
::apache::thrift::protocol::TType ftype;
|
||||
int16_t fid;
|
||||
|
||||
xfer += iprot->readStructBegin(fname);
|
||||
|
||||
using ::apache::thrift::protocol::TProtocolException;
|
||||
|
||||
|
||||
while (true)
|
||||
{
|
||||
xfer += iprot->readFieldBegin(fname, ftype, fid);
|
||||
if (ftype == ::apache::thrift::protocol::T_STOP) {
|
||||
break;
|
||||
}
|
||||
switch (fid)
|
||||
{
|
||||
case 2:
|
||||
if (ftype == ::apache::thrift::protocol::T_STRING) {
|
||||
xfer += iprot->readString(this->table_name);
|
||||
this->__isset.table_name = true;
|
||||
} else {
|
||||
xfer += iprot->skip(ftype);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
xfer += iprot->skip(ftype);
|
||||
break;
|
||||
}
|
||||
xfer += iprot->readFieldEnd();
|
||||
}
|
||||
|
||||
xfer += iprot->readStructEnd();
|
||||
|
||||
return xfer;
|
||||
}
|
||||
|
||||
uint32_t MilvusService_BuildIndex_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
|
||||
uint32_t xfer = 0;
|
||||
::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
|
||||
xfer += oprot->writeStructBegin("MilvusService_BuildIndex_args");
|
||||
|
||||
xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2);
|
||||
xfer += oprot->writeString(this->table_name);
|
||||
xfer += oprot->writeFieldEnd();
|
||||
|
||||
xfer += oprot->writeFieldStop();
|
||||
xfer += oprot->writeStructEnd();
|
||||
return xfer;
|
||||
}
|
||||
|
||||
|
||||
MilvusService_BuildIndex_pargs::~MilvusService_BuildIndex_pargs() throw() {
|
||||
}
|
||||
|
||||
|
||||
uint32_t MilvusService_BuildIndex_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
|
||||
uint32_t xfer = 0;
|
||||
::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
|
||||
xfer += oprot->writeStructBegin("MilvusService_BuildIndex_pargs");
|
||||
|
||||
xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2);
|
||||
xfer += oprot->writeString((*(this->table_name)));
|
||||
xfer += oprot->writeFieldEnd();
|
||||
|
||||
xfer += oprot->writeFieldStop();
|
||||
xfer += oprot->writeStructEnd();
|
||||
return xfer;
|
||||
}
|
||||
|
||||
|
||||
MilvusService_BuildIndex_result::~MilvusService_BuildIndex_result() throw() {
|
||||
}
|
||||
|
||||
|
||||
uint32_t MilvusService_BuildIndex_result::read(::apache::thrift::protocol::TProtocol* iprot) {
|
||||
|
||||
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
|
||||
uint32_t xfer = 0;
|
||||
std::string fname;
|
||||
::apache::thrift::protocol::TType ftype;
|
||||
int16_t fid;
|
||||
|
||||
xfer += iprot->readStructBegin(fname);
|
||||
|
||||
using ::apache::thrift::protocol::TProtocolException;
|
||||
|
||||
|
||||
while (true)
|
||||
{
|
||||
xfer += iprot->readFieldBegin(fname, ftype, fid);
|
||||
if (ftype == ::apache::thrift::protocol::T_STOP) {
|
||||
break;
|
||||
}
|
||||
switch (fid)
|
||||
{
|
||||
case 1:
|
||||
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
|
||||
xfer += this->e.read(iprot);
|
||||
this->__isset.e = true;
|
||||
} else {
|
||||
xfer += iprot->skip(ftype);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
xfer += iprot->skip(ftype);
|
||||
break;
|
||||
}
|
||||
xfer += iprot->readFieldEnd();
|
||||
}
|
||||
|
||||
xfer += iprot->readStructEnd();
|
||||
|
||||
return xfer;
|
||||
}
|
||||
|
||||
uint32_t MilvusService_BuildIndex_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
|
||||
|
||||
uint32_t xfer = 0;
|
||||
|
||||
xfer += oprot->writeStructBegin("MilvusService_BuildIndex_result");
|
||||
|
||||
if (this->__isset.e) {
|
||||
xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
|
||||
xfer += this->e.write(oprot);
|
||||
xfer += oprot->writeFieldEnd();
|
||||
}
|
||||
xfer += oprot->writeFieldStop();
|
||||
xfer += oprot->writeStructEnd();
|
||||
return xfer;
|
||||
}
|
||||
|
||||
|
||||
MilvusService_BuildIndex_presult::~MilvusService_BuildIndex_presult() throw() {
|
||||
}
|
||||
|
||||
|
||||
uint32_t MilvusService_BuildIndex_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
|
||||
|
||||
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
|
||||
uint32_t xfer = 0;
|
||||
std::string fname;
|
||||
::apache::thrift::protocol::TType ftype;
|
||||
int16_t fid;
|
||||
|
||||
xfer += iprot->readStructBegin(fname);
|
||||
|
||||
using ::apache::thrift::protocol::TProtocolException;
|
||||
|
||||
|
||||
while (true)
|
||||
{
|
||||
xfer += iprot->readFieldBegin(fname, ftype, fid);
|
||||
if (ftype == ::apache::thrift::protocol::T_STOP) {
|
||||
break;
|
||||
}
|
||||
switch (fid)
|
||||
{
|
||||
case 1:
|
||||
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
|
||||
xfer += this->e.read(iprot);
|
||||
this->__isset.e = true;
|
||||
} else {
|
||||
xfer += iprot->skip(ftype);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
xfer += iprot->skip(ftype);
|
||||
break;
|
||||
}
|
||||
xfer += iprot->readFieldEnd();
|
||||
}
|
||||
|
||||
xfer += iprot->readStructEnd();
|
||||
|
||||
return xfer;
|
||||
}
|
||||
|
||||
|
||||
MilvusService_AddVector_args::~MilvusService_AddVector_args() throw() {
|
||||
}
|
||||
|
||||
@ -2614,6 +2801,62 @@ void MilvusServiceClient::recv_DeleteTable()
|
||||
return;
|
||||
}
|
||||
|
||||
void MilvusServiceClient::BuildIndex(const std::string& table_name)
|
||||
{
|
||||
send_BuildIndex(table_name);
|
||||
recv_BuildIndex();
|
||||
}
|
||||
|
||||
void MilvusServiceClient::send_BuildIndex(const std::string& table_name)
|
||||
{
|
||||
int32_t cseqid = 0;
|
||||
oprot_->writeMessageBegin("BuildIndex", ::apache::thrift::protocol::T_CALL, cseqid);
|
||||
|
||||
MilvusService_BuildIndex_pargs args;
|
||||
args.table_name = &table_name;
|
||||
args.write(oprot_);
|
||||
|
||||
oprot_->writeMessageEnd();
|
||||
oprot_->getTransport()->writeEnd();
|
||||
oprot_->getTransport()->flush();
|
||||
}
|
||||
|
||||
void MilvusServiceClient::recv_BuildIndex()
|
||||
{
|
||||
|
||||
int32_t rseqid = 0;
|
||||
std::string fname;
|
||||
::apache::thrift::protocol::TMessageType mtype;
|
||||
|
||||
iprot_->readMessageBegin(fname, mtype, rseqid);
|
||||
if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
|
||||
::apache::thrift::TApplicationException x;
|
||||
x.read(iprot_);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
throw x;
|
||||
}
|
||||
if (mtype != ::apache::thrift::protocol::T_REPLY) {
|
||||
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
}
|
||||
if (fname.compare("BuildIndex") != 0) {
|
||||
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
}
|
||||
MilvusService_BuildIndex_presult result;
|
||||
result.read(iprot_);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
|
||||
if (result.__isset.e) {
|
||||
throw result.e;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void MilvusServiceClient::AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array)
|
||||
{
|
||||
send_AddVector(table_name, record_array);
|
||||
@ -3236,6 +3479,62 @@ void MilvusServiceProcessor::process_DeleteTable(int32_t seqid, ::apache::thrift
|
||||
}
|
||||
}
|
||||
|
||||
void MilvusServiceProcessor::process_BuildIndex(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
|
||||
{
|
||||
void* ctx = NULL;
|
||||
if (this->eventHandler_.get() != NULL) {
|
||||
ctx = this->eventHandler_->getContext("MilvusService.BuildIndex", callContext);
|
||||
}
|
||||
::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "MilvusService.BuildIndex");
|
||||
|
||||
if (this->eventHandler_.get() != NULL) {
|
||||
this->eventHandler_->preRead(ctx, "MilvusService.BuildIndex");
|
||||
}
|
||||
|
||||
MilvusService_BuildIndex_args args;
|
||||
args.read(iprot);
|
||||
iprot->readMessageEnd();
|
||||
uint32_t bytes = iprot->getTransport()->readEnd();
|
||||
|
||||
if (this->eventHandler_.get() != NULL) {
|
||||
this->eventHandler_->postRead(ctx, "MilvusService.BuildIndex", bytes);
|
||||
}
|
||||
|
||||
MilvusService_BuildIndex_result result;
|
||||
try {
|
||||
iface_->BuildIndex(args.table_name);
|
||||
} catch (Exception &e) {
|
||||
result.e = e;
|
||||
result.__isset.e = true;
|
||||
} catch (const std::exception& e) {
|
||||
if (this->eventHandler_.get() != NULL) {
|
||||
this->eventHandler_->handlerError(ctx, "MilvusService.BuildIndex");
|
||||
}
|
||||
|
||||
::apache::thrift::TApplicationException x(e.what());
|
||||
oprot->writeMessageBegin("BuildIndex", ::apache::thrift::protocol::T_EXCEPTION, seqid);
|
||||
x.write(oprot);
|
||||
oprot->writeMessageEnd();
|
||||
oprot->getTransport()->writeEnd();
|
||||
oprot->getTransport()->flush();
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->eventHandler_.get() != NULL) {
|
||||
this->eventHandler_->preWrite(ctx, "MilvusService.BuildIndex");
|
||||
}
|
||||
|
||||
oprot->writeMessageBegin("BuildIndex", ::apache::thrift::protocol::T_REPLY, seqid);
|
||||
result.write(oprot);
|
||||
oprot->writeMessageEnd();
|
||||
bytes = oprot->getTransport()->writeEnd();
|
||||
oprot->getTransport()->flush();
|
||||
|
||||
if (this->eventHandler_.get() != NULL) {
|
||||
this->eventHandler_->postWrite(ctx, "MilvusService.BuildIndex", bytes);
|
||||
}
|
||||
}
|
||||
|
||||
void MilvusServiceProcessor::process_AddVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
|
||||
{
|
||||
void* ctx = NULL;
|
||||
@ -3894,6 +4193,88 @@ void MilvusServiceConcurrentClient::recv_DeleteTable(const int32_t seqid)
|
||||
} // end while(true)
|
||||
}
|
||||
|
||||
void MilvusServiceConcurrentClient::BuildIndex(const std::string& table_name)
|
||||
{
|
||||
int32_t seqid = send_BuildIndex(table_name);
|
||||
recv_BuildIndex(seqid);
|
||||
}
|
||||
|
||||
int32_t MilvusServiceConcurrentClient::send_BuildIndex(const std::string& table_name)
|
||||
{
|
||||
int32_t cseqid = this->sync_.generateSeqId();
|
||||
::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
|
||||
oprot_->writeMessageBegin("BuildIndex", ::apache::thrift::protocol::T_CALL, cseqid);
|
||||
|
||||
MilvusService_BuildIndex_pargs args;
|
||||
args.table_name = &table_name;
|
||||
args.write(oprot_);
|
||||
|
||||
oprot_->writeMessageEnd();
|
||||
oprot_->getTransport()->writeEnd();
|
||||
oprot_->getTransport()->flush();
|
||||
|
||||
sentry.commit();
|
||||
return cseqid;
|
||||
}
|
||||
|
||||
void MilvusServiceConcurrentClient::recv_BuildIndex(const int32_t seqid)
|
||||
{
|
||||
|
||||
int32_t rseqid = 0;
|
||||
std::string fname;
|
||||
::apache::thrift::protocol::TMessageType mtype;
|
||||
|
||||
// the read mutex gets dropped and reacquired as part of waitForWork()
|
||||
// The destructor of this sentry wakes up other clients
|
||||
::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
|
||||
|
||||
while(true) {
|
||||
if(!this->sync_.getPending(fname, mtype, rseqid)) {
|
||||
iprot_->readMessageBegin(fname, mtype, rseqid);
|
||||
}
|
||||
if(seqid == rseqid) {
|
||||
if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
|
||||
::apache::thrift::TApplicationException x;
|
||||
x.read(iprot_);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
sentry.commit();
|
||||
throw x;
|
||||
}
|
||||
if (mtype != ::apache::thrift::protocol::T_REPLY) {
|
||||
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
}
|
||||
if (fname.compare("BuildIndex") != 0) {
|
||||
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
|
||||
// in a bad state, don't commit
|
||||
using ::apache::thrift::protocol::TProtocolException;
|
||||
throw TProtocolException(TProtocolException::INVALID_DATA);
|
||||
}
|
||||
MilvusService_BuildIndex_presult result;
|
||||
result.read(iprot_);
|
||||
iprot_->readMessageEnd();
|
||||
iprot_->getTransport()->readEnd();
|
||||
|
||||
if (result.__isset.e) {
|
||||
sentry.commit();
|
||||
throw result.e;
|
||||
}
|
||||
sentry.commit();
|
||||
return;
|
||||
}
|
||||
// seqid != rseqid
|
||||
this->sync_.updatePending(fname, mtype, rseqid);
|
||||
|
||||
// this will temporarily unlock the readMutex, and let other clients get work done
|
||||
this->sync_.waitForWork(seqid);
|
||||
} // end while(true)
|
||||
}
|
||||
|
||||
void MilvusServiceConcurrentClient::AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array)
|
||||
{
|
||||
int32_t seqid = send_AddVector(table_name, record_array);
|
||||
|
||||
@ -58,6 +58,18 @@ class MilvusServiceIf {
|
||||
*/
|
||||
virtual void DeleteTable(const std::string& table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*
|
||||
* @param table_name
|
||||
*/
|
||||
virtual void BuildIndex(const std::string& table_name) = 0;
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
@ -197,6 +209,9 @@ class MilvusServiceNull : virtual public MilvusServiceIf {
|
||||
void DeleteTable(const std::string& /* table_name */) {
|
||||
return;
|
||||
}
|
||||
void BuildIndex(const std::string& /* table_name */) {
|
||||
return;
|
||||
}
|
||||
void AddVector(std::vector<int64_t> & /* _return */, const std::string& /* table_name */, const std::vector<RowRecord> & /* record_array */) {
|
||||
return;
|
||||
}
|
||||
@ -541,6 +556,110 @@ class MilvusService_DeleteTable_presult {
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_BuildIndex_args__isset {
|
||||
_MilvusService_BuildIndex_args__isset() : table_name(false) {}
|
||||
bool table_name :1;
|
||||
} _MilvusService_BuildIndex_args__isset;
|
||||
|
||||
class MilvusService_BuildIndex_args {
|
||||
public:
|
||||
|
||||
MilvusService_BuildIndex_args(const MilvusService_BuildIndex_args&);
|
||||
MilvusService_BuildIndex_args& operator=(const MilvusService_BuildIndex_args&);
|
||||
MilvusService_BuildIndex_args() : table_name() {
|
||||
}
|
||||
|
||||
virtual ~MilvusService_BuildIndex_args() throw();
|
||||
std::string table_name;
|
||||
|
||||
_MilvusService_BuildIndex_args__isset __isset;
|
||||
|
||||
void __set_table_name(const std::string& val);
|
||||
|
||||
bool operator == (const MilvusService_BuildIndex_args & rhs) const
|
||||
{
|
||||
if (!(table_name == rhs.table_name))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const MilvusService_BuildIndex_args &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const MilvusService_BuildIndex_args & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
|
||||
class MilvusService_BuildIndex_pargs {
|
||||
public:
|
||||
|
||||
|
||||
virtual ~MilvusService_BuildIndex_pargs() throw();
|
||||
const std::string* table_name;
|
||||
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_BuildIndex_result__isset {
|
||||
_MilvusService_BuildIndex_result__isset() : e(false) {}
|
||||
bool e :1;
|
||||
} _MilvusService_BuildIndex_result__isset;
|
||||
|
||||
class MilvusService_BuildIndex_result {
|
||||
public:
|
||||
|
||||
MilvusService_BuildIndex_result(const MilvusService_BuildIndex_result&);
|
||||
MilvusService_BuildIndex_result& operator=(const MilvusService_BuildIndex_result&);
|
||||
MilvusService_BuildIndex_result() {
|
||||
}
|
||||
|
||||
virtual ~MilvusService_BuildIndex_result() throw();
|
||||
Exception e;
|
||||
|
||||
_MilvusService_BuildIndex_result__isset __isset;
|
||||
|
||||
void __set_e(const Exception& val);
|
||||
|
||||
bool operator == (const MilvusService_BuildIndex_result & rhs) const
|
||||
{
|
||||
if (!(e == rhs.e))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
bool operator != (const MilvusService_BuildIndex_result &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
bool operator < (const MilvusService_BuildIndex_result & ) const;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_BuildIndex_presult__isset {
|
||||
_MilvusService_BuildIndex_presult__isset() : e(false) {}
|
||||
bool e :1;
|
||||
} _MilvusService_BuildIndex_presult__isset;
|
||||
|
||||
class MilvusService_BuildIndex_presult {
|
||||
public:
|
||||
|
||||
|
||||
virtual ~MilvusService_BuildIndex_presult() throw();
|
||||
Exception e;
|
||||
|
||||
_MilvusService_BuildIndex_presult__isset __isset;
|
||||
|
||||
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
|
||||
|
||||
};
|
||||
|
||||
typedef struct _MilvusService_AddVector_args__isset {
|
||||
_MilvusService_AddVector_args__isset() : table_name(false), record_array(false) {}
|
||||
bool table_name :1;
|
||||
@ -1403,6 +1522,9 @@ class MilvusServiceClient : virtual public MilvusServiceIf {
|
||||
void DeleteTable(const std::string& table_name);
|
||||
void send_DeleteTable(const std::string& table_name);
|
||||
void recv_DeleteTable();
|
||||
void BuildIndex(const std::string& table_name);
|
||||
void send_BuildIndex(const std::string& table_name);
|
||||
void recv_BuildIndex();
|
||||
void AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
void send_AddVector(const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
void recv_AddVector(std::vector<int64_t> & _return);
|
||||
@ -1442,6 +1564,7 @@ class MilvusServiceProcessor : public ::apache::thrift::TDispatchProcessor {
|
||||
void process_CreateTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_HasTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_DeleteTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_BuildIndex(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_AddVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_SearchVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
void process_SearchVectorInFiles(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
|
||||
@ -1455,6 +1578,7 @@ class MilvusServiceProcessor : public ::apache::thrift::TDispatchProcessor {
|
||||
processMap_["CreateTable"] = &MilvusServiceProcessor::process_CreateTable;
|
||||
processMap_["HasTable"] = &MilvusServiceProcessor::process_HasTable;
|
||||
processMap_["DeleteTable"] = &MilvusServiceProcessor::process_DeleteTable;
|
||||
processMap_["BuildIndex"] = &MilvusServiceProcessor::process_BuildIndex;
|
||||
processMap_["AddVector"] = &MilvusServiceProcessor::process_AddVector;
|
||||
processMap_["SearchVector"] = &MilvusServiceProcessor::process_SearchVector;
|
||||
processMap_["SearchVectorInFiles"] = &MilvusServiceProcessor::process_SearchVectorInFiles;
|
||||
@ -1517,6 +1641,15 @@ class MilvusServiceMultiface : virtual public MilvusServiceIf {
|
||||
ifaces_[i]->DeleteTable(table_name);
|
||||
}
|
||||
|
||||
void BuildIndex(const std::string& table_name) {
|
||||
size_t sz = ifaces_.size();
|
||||
size_t i = 0;
|
||||
for (; i < (sz - 1); ++i) {
|
||||
ifaces_[i]->BuildIndex(table_name);
|
||||
}
|
||||
ifaces_[i]->BuildIndex(table_name);
|
||||
}
|
||||
|
||||
void AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array) {
|
||||
size_t sz = ifaces_.size();
|
||||
size_t i = 0;
|
||||
@ -1625,6 +1758,9 @@ class MilvusServiceConcurrentClient : virtual public MilvusServiceIf {
|
||||
void DeleteTable(const std::string& table_name);
|
||||
int32_t send_DeleteTable(const std::string& table_name);
|
||||
void recv_DeleteTable(const int32_t seqid);
|
||||
void BuildIndex(const std::string& table_name);
|
||||
int32_t send_BuildIndex(const std::string& table_name);
|
||||
void recv_BuildIndex(const int32_t seqid);
|
||||
void AddVector(std::vector<int64_t> & _return, const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
int32_t send_AddVector(const std::string& table_name, const std::vector<RowRecord> & record_array);
|
||||
void recv_AddVector(std::vector<int64_t> & _return, const int32_t seqid);
|
||||
|
||||
@ -65,6 +65,21 @@ class MilvusServiceHandler : virtual public MilvusServiceIf {
|
||||
printf("DeleteTable\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*
|
||||
* @param table_name
|
||||
*/
|
||||
void BuildIndex(const std::string& table_name) {
|
||||
// Your implementation goes here
|
||||
printf("BuildIndex\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
|
||||
@ -34,7 +34,8 @@ int _kErrorCodeValues[] = {
|
||||
ErrorCode::CANNOT_CREATE_FOLDER,
|
||||
ErrorCode::CANNOT_CREATE_FILE,
|
||||
ErrorCode::CANNOT_DELETE_FOLDER,
|
||||
ErrorCode::CANNOT_DELETE_FILE
|
||||
ErrorCode::CANNOT_DELETE_FILE,
|
||||
ErrorCode::BUILD_INDEX_ERROR
|
||||
};
|
||||
const char* _kErrorCodeNames[] = {
|
||||
"SUCCESS",
|
||||
@ -57,9 +58,10 @@ const char* _kErrorCodeNames[] = {
|
||||
"CANNOT_CREATE_FOLDER",
|
||||
"CANNOT_CREATE_FILE",
|
||||
"CANNOT_DELETE_FOLDER",
|
||||
"CANNOT_DELETE_FILE"
|
||||
"CANNOT_DELETE_FILE",
|
||||
"BUILD_INDEX_ERROR"
|
||||
};
|
||||
const std::map<int, const char*> _ErrorCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(21, _kErrorCodeValues, _kErrorCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
|
||||
const std::map<int, const char*> _ErrorCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(22, _kErrorCodeValues, _kErrorCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const ErrorCode::type& val) {
|
||||
std::map<int, const char*>::const_iterator it = _ErrorCode_VALUES_TO_NAMES.find(val);
|
||||
|
||||
@ -42,7 +42,8 @@ struct ErrorCode {
|
||||
CANNOT_CREATE_FOLDER = 17,
|
||||
CANNOT_CREATE_FILE = 18,
|
||||
CANNOT_DELETE_FOLDER = 19,
|
||||
CANNOT_DELETE_FILE = 20
|
||||
CANNOT_DELETE_FILE = 20,
|
||||
BUILD_INDEX_ERROR = 21
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -35,6 +35,7 @@ enum ErrorCode {
|
||||
CANNOT_CREATE_FILE,
|
||||
CANNOT_DELETE_FOLDER,
|
||||
CANNOT_DELETE_FILE,
|
||||
BUILD_INDEX_ERROR,
|
||||
}
|
||||
|
||||
exception Exception {
|
||||
@ -115,6 +116,16 @@ service MilvusService {
|
||||
*/
|
||||
void DeleteTable(2: string table_name) throws(1: Exception e);
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*/
|
||||
void BuildIndex(2: string table_name) throws(1: Exception e);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
@ -207,4 +218,4 @@ service MilvusService {
|
||||
* @return Server status.
|
||||
*/
|
||||
string Ping(2: string cmd) throws(1: Exception e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,27 +32,26 @@ namespace server {
|
||||
|
||||
namespace fs = boost::filesystem;
|
||||
|
||||
bool CommonUtil::GetSystemMemInfo(unsigned long &totalMem, unsigned long &freeMem) {
|
||||
bool CommonUtil::GetSystemMemInfo(unsigned long &total_mem, unsigned long &free_mem) {
|
||||
struct sysinfo info;
|
||||
int ret = sysinfo(&info);
|
||||
totalMem = info.totalram;
|
||||
freeMem = info.freeram;
|
||||
total_mem = info.totalram;
|
||||
free_mem = info.freeram;
|
||||
|
||||
return ret == 0;//succeed 0, failed -1
|
||||
}
|
||||
|
||||
bool CommonUtil::GetSystemAvailableThreads(unsigned int &threadCnt) {
|
||||
bool CommonUtil::GetSystemAvailableThreads(unsigned int &thread_count) {
|
||||
//threadCnt = std::thread::hardware_concurrency();
|
||||
threadCnt = sysconf(_SC_NPROCESSORS_CONF);
|
||||
threadCnt *= THREAD_MULTIPLY_CPU;
|
||||
if (threadCnt == 0)
|
||||
threadCnt = 8;
|
||||
thread_count = sysconf(_SC_NPROCESSORS_CONF);
|
||||
thread_count *= THREAD_MULTIPLY_CPU;
|
||||
if (thread_count == 0)
|
||||
thread_count = 8;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CommonUtil::IsDirectoryExist(const std::string &path)
|
||||
{
|
||||
bool CommonUtil::IsDirectoryExist(const std::string &path) {
|
||||
DIR *dp = nullptr;
|
||||
if ((dp = opendir(path.c_str())) == nullptr) {
|
||||
return false;
|
||||
@ -63,9 +62,13 @@ bool CommonUtil::IsDirectoryExist(const std::string &path)
|
||||
}
|
||||
|
||||
ServerError CommonUtil::CreateDirectory(const std::string &path) {
|
||||
struct stat directoryStat;
|
||||
int statOK = stat(path.c_str(), &directoryStat);
|
||||
if (statOK == 0) {
|
||||
if(path.empty()) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
struct stat directory_stat;
|
||||
int status = stat(path.c_str(), &directory_stat);
|
||||
if (status == 0) {
|
||||
return SERVER_SUCCESS;//already exist
|
||||
}
|
||||
|
||||
@ -76,8 +79,8 @@ ServerError CommonUtil::CreateDirectory(const std::string &path) {
|
||||
return err;
|
||||
}
|
||||
|
||||
statOK = stat(path.c_str(), &directoryStat);
|
||||
if (statOK == 0) {
|
||||
status = stat(path.c_str(), &directory_stat);
|
||||
if (status == 0) {
|
||||
return SERVER_SUCCESS;//already exist
|
||||
}
|
||||
|
||||
@ -91,37 +94,41 @@ ServerError CommonUtil::CreateDirectory(const std::string &path) {
|
||||
|
||||
namespace {
|
||||
void RemoveDirectory(const std::string &path) {
|
||||
DIR *pDir = NULL;
|
||||
DIR *dir = nullptr;
|
||||
struct dirent *dmsg;
|
||||
char szFileName[256];
|
||||
char szFolderName[256];
|
||||
char file_name[256];
|
||||
char folder_name[256];
|
||||
|
||||
strcpy(szFolderName, path.c_str());
|
||||
strcat(szFolderName, "/%s");
|
||||
if ((pDir = opendir(path.c_str())) != NULL) {
|
||||
while ((dmsg = readdir(pDir)) != NULL) {
|
||||
strcpy(folder_name, path.c_str());
|
||||
strcat(folder_name, "/%s");
|
||||
if ((dir = opendir(path.c_str())) != nullptr) {
|
||||
while ((dmsg = readdir(dir)) != nullptr) {
|
||||
if (strcmp(dmsg->d_name, ".") != 0
|
||||
&& strcmp(dmsg->d_name, "..") != 0) {
|
||||
sprintf(szFileName, szFolderName, dmsg->d_name);
|
||||
std::string tmp = szFileName;
|
||||
sprintf(file_name, folder_name, dmsg->d_name);
|
||||
std::string tmp = file_name;
|
||||
if (tmp.find(".") == std::string::npos) {
|
||||
RemoveDirectory(szFileName);
|
||||
RemoveDirectory(file_name);
|
||||
}
|
||||
remove(szFileName);
|
||||
remove(file_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pDir != NULL) {
|
||||
closedir(pDir);
|
||||
if (dir != nullptr) {
|
||||
closedir(dir);
|
||||
}
|
||||
remove(path.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
ServerError CommonUtil::DeleteDirectory(const std::string &path) {
|
||||
struct stat directoryStat;
|
||||
int statOK = stat(path.c_str(), &directoryStat);
|
||||
if(path.empty()) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
struct stat directory_stat;
|
||||
int statOK = stat(path.c_str(), &directory_stat);
|
||||
if (statOK != 0)
|
||||
return SERVER_SUCCESS;
|
||||
|
||||
@ -133,6 +140,15 @@ bool CommonUtil::IsFileExist(const std::string &path) {
|
||||
return (access(path.c_str(), F_OK) == 0);
|
||||
}
|
||||
|
||||
uint64_t CommonUtil::GetFileSize(const std::string &path) {
|
||||
struct stat file_info;
|
||||
if (stat(path.c_str(), &file_info) < 0) {
|
||||
return 0;
|
||||
} else {
|
||||
return (uint64_t)file_info.st_size;
|
||||
}
|
||||
}
|
||||
|
||||
std::string CommonUtil::GetExePath() {
|
||||
const size_t buf_len = 1024;
|
||||
char buf[buf_len];
|
||||
|
||||
@ -16,10 +16,11 @@ namespace server {
|
||||
|
||||
class CommonUtil {
|
||||
public:
|
||||
static bool GetSystemMemInfo(unsigned long &totalMem, unsigned long &freeMem);
|
||||
static bool GetSystemAvailableThreads(unsigned int &threadCnt);
|
||||
static bool GetSystemMemInfo(unsigned long &total_mem, unsigned long &free_mem);
|
||||
static bool GetSystemAvailableThreads(unsigned int &thread_count);
|
||||
|
||||
static bool IsFileExist(const std::string &path);
|
||||
static uint64_t GetFileSize(const std::string &path);
|
||||
static bool IsDirectoryExist(const std::string &path);
|
||||
static ServerError CreateDirectory(const std::string &path);
|
||||
static ServerError DeleteDirectory(const std::string &path);
|
||||
|
||||
@ -35,6 +35,7 @@ constexpr ServerError SERVER_CANNOT_CREATE_FOLDER = ToGlobalServerErrorCode(8);
|
||||
constexpr ServerError SERVER_CANNOT_CREATE_FILE = ToGlobalServerErrorCode(9);
|
||||
constexpr ServerError SERVER_CANNOT_DELETE_FOLDER = ToGlobalServerErrorCode(10);
|
||||
constexpr ServerError SERVER_CANNOT_DELETE_FILE = ToGlobalServerErrorCode(11);
|
||||
constexpr ServerError SERVER_BUILD_INDEX_ERROR = ToGlobalServerErrorCode(12);
|
||||
|
||||
constexpr ServerError SERVER_TABLE_NOT_EXIST = ToGlobalServerErrorCode(100);
|
||||
constexpr ServerError SERVER_INVALID_TABLE_NAME = ToGlobalServerErrorCode(101);
|
||||
@ -77,4 +78,3 @@ private:
|
||||
} // namespace server
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
||||
|
||||
@ -27,6 +27,10 @@ void StringHelpFunctions::TrimStringQuote(std::string &string, const std::string
|
||||
ServerError StringHelpFunctions::SplitStringByDelimeter(const std::string &str,
|
||||
const std::string &delimeter,
|
||||
std::vector<std::string> &result) {
|
||||
if(str.empty()) {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
size_t last = 0;
|
||||
size_t index = str.find_first_of(delimeter, last);
|
||||
while (index != std::string::npos) {
|
||||
|
||||
74
cpp/src/utils/ValidationUtil.cpp
Normal file
74
cpp/src/utils/ValidationUtil.cpp
Normal file
@ -0,0 +1,74 @@
|
||||
#include <src/db/ExecutionEngine.h>
|
||||
#include "ValidationUtil.h"
|
||||
#include "Log.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
constexpr size_t table_name_size_limit = 255;
|
||||
constexpr int64_t table_dimension_limit = 16384;
|
||||
|
||||
ServerError
|
||||
ValidateTableName(const std::string &table_name) {
|
||||
|
||||
// Table name shouldn't be empty.
|
||||
if (table_name.empty()) {
|
||||
SERVER_LOG_ERROR << "Empty table name";
|
||||
return SERVER_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
||||
// Table name size shouldn't exceed 16384.
|
||||
if (table_name.size() > table_name_size_limit) {
|
||||
SERVER_LOG_ERROR << "Table name size exceed the limitation";
|
||||
return SERVER_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
||||
// Table name first character should be underscore or character.
|
||||
char first_char = table_name[0];
|
||||
if (first_char != '_' && std::isalpha(first_char) == 0) {
|
||||
SERVER_LOG_ERROR << "Table name first character isn't underscore or character: " << first_char;
|
||||
return SERVER_INVALID_TABLE_NAME;
|
||||
}
|
||||
|
||||
int64_t table_name_size = table_name.size();
|
||||
for (int64_t i = 1; i < table_name_size; ++i) {
|
||||
char name_char = table_name[i];
|
||||
if (name_char != '_' && std::isalnum(name_char) == 0) {
|
||||
SERVER_LOG_ERROR << "Table name character isn't underscore or alphanumber: " << name_char;
|
||||
return SERVER_INVALID_TABLE_NAME;
|
||||
}
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
ServerError
|
||||
ValidateTableDimension(int64_t dimension) {
|
||||
if (dimension <= 0 || dimension > table_dimension_limit) {
|
||||
SERVER_LOG_ERROR << "Table dimension excceed the limitation: " << table_dimension_limit;
|
||||
return SERVER_INVALID_VECTOR_DIMENSION;
|
||||
} else {
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
ServerError
|
||||
ValidateTableIndexType(int32_t index_type) {
|
||||
auto engine_type = engine::EngineType(index_type);
|
||||
switch (engine_type) {
|
||||
case engine::EngineType::FAISS_IDMAP:
|
||||
case engine::EngineType::FAISS_IVFFLAT: {
|
||||
SERVER_LOG_DEBUG << "Index type: " << index_type;
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
default: {
|
||||
return SERVER_INVALID_INDEX_TYPE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
20
cpp/src/utils/ValidationUtil.h
Normal file
20
cpp/src/utils/ValidationUtil.h
Normal file
@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include "Error.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
ServerError
|
||||
ValidateTableName(const std::string& table_name);
|
||||
|
||||
ServerError
|
||||
ValidateTableDimension(int64_t dimension);
|
||||
|
||||
ServerError
|
||||
ValidateTableIndexType(int32_t index_type);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -71,7 +71,8 @@ Index_ptr IndexBuilder::build_all(const long &nb,
|
||||
{
|
||||
LOG(DEBUG) << "Build index by GPU";
|
||||
// TODO: list support index-type.
|
||||
faiss::Index *ori_index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str());
|
||||
faiss::MetricType metric_type = opd_->metric_type == "L2" ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
faiss::Index *ori_index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type);
|
||||
|
||||
std::lock_guard<std::mutex> lk(gpu_resource);
|
||||
faiss::gpu::StandardGpuResources res;
|
||||
@ -90,7 +91,8 @@ Index_ptr IndexBuilder::build_all(const long &nb,
|
||||
#else
|
||||
{
|
||||
LOG(DEBUG) << "Build index by CPU";
|
||||
faiss::Index *index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str());
|
||||
faiss::MetricType metric_type = opd_->metric_type == "L2" ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
faiss::Index *index = faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type);
|
||||
if (!index->is_trained) {
|
||||
nt == 0 || xt == nullptr ? index->train(nb, xb)
|
||||
: index->train(nt, xt);
|
||||
@ -113,7 +115,8 @@ BgCpuBuilder::BgCpuBuilder(const zilliz::milvus::engine::Operand_ptr &opd) : Ind
|
||||
|
||||
Index_ptr BgCpuBuilder::build_all(const long &nb, const float *xb, const long *ids, const long &nt, const float *xt) {
|
||||
std::shared_ptr<faiss::Index> index = nullptr;
|
||||
index.reset(faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str()));
|
||||
faiss::MetricType metric_type = opd_->metric_type == "L2" ? faiss::METRIC_L2 : faiss::METRIC_INNER_PRODUCT;
|
||||
index.reset(faiss::index_factory(opd_->d, opd_->get_index_type(nb).c_str(), metric_type));
|
||||
|
||||
LOG(DEBUG) << "Build index by CPU";
|
||||
{
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
// Proprietary and confidential.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "src/server/ServerConfig.h"
|
||||
#include "Operand.h"
|
||||
|
||||
|
||||
@ -16,12 +17,14 @@ using std::string;
|
||||
enum IndexType {
|
||||
Invalid_Option = 0,
|
||||
IVF = 1,
|
||||
IDMAP = 2
|
||||
IDMAP = 2,
|
||||
IVFSQ8 = 3,
|
||||
};
|
||||
|
||||
IndexType resolveIndexType(const string &index_type) {
|
||||
if (index_type == "IVF") { return IndexType::IVF; }
|
||||
if (index_type == "IDMap") { return IndexType::IDMAP; }
|
||||
if (index_type == "IVFSQ8") { return IndexType::IVFSQ8; }
|
||||
return IndexType::Invalid_Option;
|
||||
}
|
||||
|
||||
@ -29,27 +32,44 @@ IndexType resolveIndexType(const string &index_type) {
|
||||
string Operand::get_index_type(const int &nb) {
|
||||
if (!index_str.empty()) { return index_str; }
|
||||
|
||||
// TODO: support OPQ or ...
|
||||
if (!preproc.empty()) { index_str += (preproc + ","); }
|
||||
|
||||
switch (resolveIndexType(index_type)) {
|
||||
case Invalid_Option: {
|
||||
// TODO: add exception
|
||||
break;
|
||||
}
|
||||
case IVF: {
|
||||
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
size_t nlist = engine_config.GetInt32Value(CONFIG_NLIST, 16384);
|
||||
|
||||
index_str += (ncent != 0 ? index_type + std::to_string(ncent) :
|
||||
index_type + std::to_string(int(nb / 1000000.0 * 16384)));
|
||||
index_type + std::to_string(int(nb / 1000000.0 * nlist)));
|
||||
// std::cout<<"nlist = "<<nlist<<std::endl;
|
||||
if (!postproc.empty()) { index_str += ("," + postproc); }
|
||||
break;
|
||||
}
|
||||
case IVFSQ8: {
|
||||
|
||||
using namespace zilliz::milvus::server;
|
||||
ServerConfig &config = ServerConfig::GetInstance();
|
||||
ConfigNode engine_config = config.GetConfig(CONFIG_ENGINE);
|
||||
size_t nlist = engine_config.GetInt32Value(CONFIG_NLIST, 16384);
|
||||
|
||||
index_str += (ncent != 0 ? "IVF" + std::to_string(ncent) :
|
||||
"IVF" + std::to_string(int(nb / 1000000.0 * nlist)));
|
||||
index_str += ",SQ8";
|
||||
// std::cout<<"nlist = "<<nlist<<std::endl;
|
||||
break;
|
||||
}
|
||||
case IDMAP: {
|
||||
index_str += index_type;
|
||||
if (!postproc.empty()) { index_str += ("," + postproc); }
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: support PQ or ...
|
||||
if (!postproc.empty()) { index_str += ("," + postproc); }
|
||||
return index_str;
|
||||
}
|
||||
|
||||
|
||||
@ -12,7 +12,6 @@ aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
|
||||
|
||||
set(unittest_srcs
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/main.cpp)
|
||||
#${EASYLOGGINGPP_INCLUDE_DIR}/easylogging++.cc)
|
||||
|
||||
set(require_files
|
||||
${MILVUS_ENGINE_SRC}/server/ServerConfig.cpp
|
||||
@ -44,4 +43,5 @@ add_subdirectory(db)
|
||||
add_subdirectory(faiss_wrapper)
|
||||
#add_subdirectory(license)
|
||||
add_subdirectory(metrics)
|
||||
add_subdirectory(storage)
|
||||
add_subdirectory(storage)
|
||||
add_subdirectory(utils)
|
||||
@ -23,6 +23,8 @@ link_directories("/usr/local/cuda/lib64")
|
||||
|
||||
include_directories(/usr/include/mysql)
|
||||
|
||||
#add_definitions(-DBOOST_ERROR_CODE_HEADER_ONLY)
|
||||
|
||||
set(db_test_src
|
||||
#${unittest_srcs}
|
||||
${config_files}
|
||||
@ -40,9 +42,9 @@ set(db_libs
|
||||
faiss
|
||||
cudart
|
||||
cublas
|
||||
sqlite3
|
||||
boost_system
|
||||
boost_filesystem
|
||||
sqlite
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
lz4
|
||||
mysqlpp
|
||||
)
|
||||
|
||||
@ -3,17 +3,20 @@
|
||||
// Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
// Proprietary and confidential.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#include <gtest/gtest.h>
|
||||
#include <thread>
|
||||
#include <easylogging++.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include "utils.h"
|
||||
#include "db/DB.h"
|
||||
#include "db/DBImpl.h"
|
||||
#include "db/MetaConsts.h"
|
||||
#include "db/Factories.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <easylogging++.h>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <random>
|
||||
|
||||
using namespace zilliz::milvus;
|
||||
|
||||
namespace {
|
||||
@ -272,7 +275,9 @@ TEST_F(DBTest2, DELETE_TEST) {
|
||||
stat = db_->DescribeTable(table_info_get);
|
||||
ASSERT_STATS(stat);
|
||||
|
||||
ASSERT_TRUE(boost::filesystem::exists(table_info_get.location_));
|
||||
bool has_table = false;
|
||||
db_->HasTable(TABLE_NAME, has_table);
|
||||
ASSERT_TRUE(has_table);
|
||||
|
||||
engine::IDNumbers vector_ids;
|
||||
|
||||
@ -293,5 +298,7 @@ TEST_F(DBTest2, DELETE_TEST) {
|
||||
stat = db_->DeleteTable(TABLE_NAME, dates);
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
ASSERT_TRUE(stat.ok());
|
||||
ASSERT_FALSE(boost::filesystem::exists(table_info_get.location_));
|
||||
|
||||
db_->HasTable(TABLE_NAME, has_table);
|
||||
ASSERT_FALSE(has_table);
|
||||
};
|
||||
|
||||
372
cpp/unittest/db/mem_test.cpp
Normal file
372
cpp/unittest/db/mem_test.cpp
Normal file
@ -0,0 +1,372 @@
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "db/VectorSource.h"
|
||||
#include "db/MemTableFile.h"
|
||||
#include "db/MemTable.h"
|
||||
#include "utils.h"
|
||||
#include "db/Factories.h"
|
||||
#include "db/Constants.h"
|
||||
#include "db/EngineFactory.h"
|
||||
#include "metrics/Metrics.h"
|
||||
#include "db/MetaConsts.h"
|
||||
#include "boost/filesystem.hpp"
|
||||
|
||||
#include <thread>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
#include <random>
|
||||
|
||||
using namespace zilliz::milvus;
|
||||
|
||||
namespace {
|
||||
|
||||
static const std::string TABLE_NAME = "test_group";
|
||||
static constexpr int64_t TABLE_DIM = 256;
|
||||
static constexpr int64_t VECTOR_COUNT = 250000;
|
||||
static constexpr int64_t INSERT_LOOP = 10000;
|
||||
|
||||
engine::meta::TableSchema BuildTableSchema() {
|
||||
engine::meta::TableSchema table_info;
|
||||
table_info.dimension_ = TABLE_DIM;
|
||||
table_info.table_id_ = TABLE_NAME;
|
||||
table_info.engine_type_ = (int) engine::EngineType::FAISS_IDMAP;
|
||||
return table_info;
|
||||
}
|
||||
|
||||
void BuildVectors(int64_t n, std::vector<float> &vectors) {
|
||||
vectors.clear();
|
||||
vectors.resize(n * TABLE_DIM);
|
||||
float *data = vectors.data();
|
||||
for (int i = 0; i < n; i++) {
|
||||
for (int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48();
|
||||
data[TABLE_DIM * i] += i / 2000.;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, VECTOR_SOURCE_TEST) {
|
||||
|
||||
std::shared_ptr<engine::meta::DBMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
|
||||
|
||||
engine::meta::TableSchema table_schema = BuildTableSchema();
|
||||
auto status = impl_->CreateTable(table_schema);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
engine::meta::TableFileSchema table_file_schema;
|
||||
table_file_schema.table_id_ = TABLE_NAME;
|
||||
status = impl_->CreateTableFile(table_file_schema);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
int64_t n = 100;
|
||||
std::vector<float> vectors;
|
||||
BuildVectors(n, vectors);
|
||||
|
||||
engine::VectorSource source(n, vectors.data());
|
||||
|
||||
size_t num_vectors_added;
|
||||
engine::ExecutionEnginePtr execution_engine_ = engine::EngineFactory::Build(table_file_schema.dimension_,
|
||||
table_file_schema.location_,
|
||||
(engine::EngineType) table_file_schema.engine_type_);
|
||||
status = source.Add(execution_engine_, table_file_schema, 50, num_vectors_added);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
ASSERT_EQ(num_vectors_added, 50);
|
||||
|
||||
engine::IDNumbers vector_ids = source.GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), 50);
|
||||
|
||||
status = source.Add(execution_engine_, table_file_schema, 60, num_vectors_added);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
ASSERT_EQ(num_vectors_added, 50);
|
||||
|
||||
vector_ids = source.GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), 100);
|
||||
|
||||
status = impl_->DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, MEM_TABLE_FILE_TEST) {
|
||||
|
||||
std::shared_ptr<engine::meta::DBMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
|
||||
engine::meta::TableSchema table_schema = BuildTableSchema();
|
||||
auto status = impl_->CreateTable(table_schema);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
engine::MemTableFile mem_table_file(TABLE_NAME, impl_, options);
|
||||
|
||||
int64_t n_100 = 100;
|
||||
std::vector<float> vectors_100;
|
||||
BuildVectors(n_100, vectors_100);
|
||||
|
||||
engine::VectorSource::Ptr source = std::make_shared<engine::VectorSource>(n_100, vectors_100.data());
|
||||
|
||||
status = mem_table_file.Add(source);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
// std::cout << mem_table_file.GetCurrentMem() << " " << mem_table_file.GetMemLeft() << std::endl;
|
||||
|
||||
engine::IDNumbers vector_ids = source->GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), 100);
|
||||
|
||||
size_t singleVectorMem = sizeof(float) * TABLE_DIM;
|
||||
ASSERT_EQ(mem_table_file.GetCurrentMem(), n_100 * singleVectorMem);
|
||||
|
||||
int64_t n_max = engine::MAX_TABLE_FILE_MEM / singleVectorMem;
|
||||
std::vector<float> vectors_128M;
|
||||
BuildVectors(n_max, vectors_128M);
|
||||
|
||||
engine::VectorSource::Ptr source_128M = std::make_shared<engine::VectorSource>(n_max, vectors_128M.data());
|
||||
status = mem_table_file.Add(source_128M);
|
||||
|
||||
vector_ids = source_128M->GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), n_max - n_100);
|
||||
|
||||
ASSERT_TRUE(mem_table_file.IsFull());
|
||||
|
||||
status = impl_->DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, MEM_TABLE_TEST) {
|
||||
|
||||
std::shared_ptr<engine::meta::DBMetaImpl> impl_ = engine::DBMetaImplFactory::Build();
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
|
||||
engine::meta::TableSchema table_schema = BuildTableSchema();
|
||||
auto status = impl_->CreateTable(table_schema);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
int64_t n_100 = 100;
|
||||
std::vector<float> vectors_100;
|
||||
BuildVectors(n_100, vectors_100);
|
||||
|
||||
engine::VectorSource::Ptr source_100 = std::make_shared<engine::VectorSource>(n_100, vectors_100.data());
|
||||
|
||||
engine::MemTable mem_table(TABLE_NAME, impl_, options);
|
||||
|
||||
status = mem_table.Add(source_100);
|
||||
ASSERT_TRUE(status.ok());
|
||||
engine::IDNumbers vector_ids = source_100->GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), 100);
|
||||
|
||||
engine::MemTableFile::Ptr mem_table_file;
|
||||
mem_table.GetCurrentMemTableFile(mem_table_file);
|
||||
size_t singleVectorMem = sizeof(float) * TABLE_DIM;
|
||||
ASSERT_EQ(mem_table_file->GetCurrentMem(), n_100 * singleVectorMem);
|
||||
|
||||
int64_t n_max = engine::MAX_TABLE_FILE_MEM / singleVectorMem;
|
||||
std::vector<float> vectors_128M;
|
||||
BuildVectors(n_max, vectors_128M);
|
||||
|
||||
engine::VectorSource::Ptr source_128M = std::make_shared<engine::VectorSource>(n_max, vectors_128M.data());
|
||||
status = mem_table.Add(source_128M);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
vector_ids = source_128M->GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), n_max);
|
||||
|
||||
mem_table.GetCurrentMemTableFile(mem_table_file);
|
||||
ASSERT_EQ(mem_table_file->GetCurrentMem(), n_100 * singleVectorMem);
|
||||
|
||||
ASSERT_EQ(mem_table.GetTableFileCount(), 2);
|
||||
|
||||
int64_t n_1G = 1024000;
|
||||
std::vector<float> vectors_1G;
|
||||
BuildVectors(n_1G, vectors_1G);
|
||||
|
||||
engine::VectorSource::Ptr source_1G = std::make_shared<engine::VectorSource>(n_1G, vectors_1G.data());
|
||||
|
||||
status = mem_table.Add(source_1G);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
vector_ids = source_1G->GetVectorIds();
|
||||
ASSERT_EQ(vector_ids.size(), n_1G);
|
||||
|
||||
int expectedTableFileCount = 2 + std::ceil((n_1G - n_100) * singleVectorMem / engine::MAX_TABLE_FILE_MEM);
|
||||
ASSERT_EQ(mem_table.GetTableFileCount(), expectedTableFileCount);
|
||||
|
||||
status = mem_table.Serialize();
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
status = impl_->DropAll();
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, SERIAL_INSERT_SEARCH_TEST) {
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
engine::meta::TableSchema table_info_get;
|
||||
table_info_get.table_id_ = TABLE_NAME;
|
||||
stat = db_->DescribeTable(table_info_get);
|
||||
ASSERT_STATS(stat);
|
||||
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
|
||||
|
||||
std::map<int64_t, std::vector<float>> search_vectors;
|
||||
{
|
||||
engine::IDNumbers vector_ids;
|
||||
int64_t nb = 1024000;
|
||||
std::vector<float> xb;
|
||||
BuildVectors(nb, xb);
|
||||
engine::Status status = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::seconds(3));
|
||||
|
||||
std::random_device rd;
|
||||
std::mt19937 gen(rd());
|
||||
std::uniform_int_distribution<int64_t> dis(0, nb - 1);
|
||||
|
||||
int64_t num_query = 20;
|
||||
for (int64_t i = 0; i < num_query; ++i) {
|
||||
int64_t index = dis(gen);
|
||||
std::vector<float> search;
|
||||
for (int64_t j = 0; j < TABLE_DIM; j++) {
|
||||
search.push_back(xb[index * TABLE_DIM + j]);
|
||||
}
|
||||
search_vectors.insert(std::make_pair(vector_ids[index], search));
|
||||
}
|
||||
}
|
||||
|
||||
int k = 10;
|
||||
for (auto &pair : search_vectors) {
|
||||
auto &search = pair.second;
|
||||
engine::QueryResults results;
|
||||
stat = db_->Query(TABLE_NAME, k, 1, search.data(), results);
|
||||
ASSERT_EQ(results[0][0].first, pair.first);
|
||||
ASSERT_LT(results[0][0].second, 0.00001);
|
||||
}
|
||||
|
||||
delete db_;
|
||||
boost::filesystem::remove_all(options.meta.path);
|
||||
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, INSERT_TEST) {
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
engine::meta::TableSchema table_info_get;
|
||||
table_info_get.table_id_ = TABLE_NAME;
|
||||
stat = db_->DescribeTable(table_info_get);
|
||||
ASSERT_STATS(stat);
|
||||
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
|
||||
int insert_loop = 20;
|
||||
for (int i = 0; i < insert_loop; ++i) {
|
||||
int64_t nb = 409600;
|
||||
std::vector<float> xb;
|
||||
BuildVectors(nb, xb);
|
||||
engine::IDNumbers vector_ids;
|
||||
engine::Status status = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
|
||||
ASSERT_TRUE(status.ok());
|
||||
}
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
LOG(DEBUG) << "total_time spent in INSERT_TEST (ms) : " << total_time;
|
||||
|
||||
delete db_;
|
||||
boost::filesystem::remove_all(options.meta.path);
|
||||
|
||||
}
|
||||
|
||||
TEST_F(NewMemManagerTest, CONCURRENT_INSERT_SEARCH_TEST) {
|
||||
|
||||
auto options = engine::OptionsFactory::Build();
|
||||
options.meta.path = "/tmp/milvus_test";
|
||||
options.meta.backend_uri = "sqlite://:@:/";
|
||||
auto db_ = engine::DBFactory::Build(options);
|
||||
|
||||
engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
engine::Status stat = db_->CreateTable(table_info);
|
||||
|
||||
engine::meta::TableSchema table_info_get;
|
||||
table_info_get.table_id_ = TABLE_NAME;
|
||||
stat = db_->DescribeTable(table_info_get);
|
||||
ASSERT_STATS(stat);
|
||||
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
|
||||
|
||||
engine::IDNumbers vector_ids;
|
||||
engine::IDNumbers target_ids;
|
||||
|
||||
int64_t nb = 409600;
|
||||
std::vector<float> xb;
|
||||
BuildVectors(nb, xb);
|
||||
|
||||
int64_t qb = 5;
|
||||
std::vector<float> qxb;
|
||||
BuildVectors(qb, qxb);
|
||||
|
||||
std::thread search([&]() {
|
||||
engine::QueryResults results;
|
||||
int k = 10;
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
|
||||
INIT_TIMER;
|
||||
std::stringstream ss;
|
||||
uint64_t count = 0;
|
||||
uint64_t prev_count = 0;
|
||||
|
||||
for (auto j = 0; j < 10; ++j) {
|
||||
ss.str("");
|
||||
db_->Size(count);
|
||||
prev_count = count;
|
||||
|
||||
START_TIMER;
|
||||
stat = db_->Query(TABLE_NAME, k, qb, qxb.data(), results);
|
||||
ss << "Search " << j << " With Size " << count / engine::meta::M << " M";
|
||||
STOP_TIMER(ss.str());
|
||||
|
||||
ASSERT_STATS(stat);
|
||||
for (auto k = 0; k < qb; ++k) {
|
||||
ASSERT_EQ(results[k][0].first, target_ids[k]);
|
||||
ss.str("");
|
||||
ss << "Result [" << k << "]:";
|
||||
for (auto result : results[k]) {
|
||||
ss << result.first << " ";
|
||||
}
|
||||
/* LOG(DEBUG) << ss.str(); */
|
||||
}
|
||||
ASSERT_TRUE(count >= prev_count);
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
}
|
||||
});
|
||||
|
||||
int loop = 20;
|
||||
|
||||
for (auto i = 0; i < loop; ++i) {
|
||||
if (i == 0) {
|
||||
db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
|
||||
ASSERT_EQ(target_ids.size(), qb);
|
||||
} else {
|
||||
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::microseconds(1));
|
||||
}
|
||||
|
||||
search.join();
|
||||
|
||||
delete db_;
|
||||
boost::filesystem::remove_all(options.meta.path);
|
||||
|
||||
};
|
||||
|
||||
@ -38,7 +38,7 @@ TEST_F(MetaTest, TABLE_TEST) {
|
||||
|
||||
table.table_id_ = table_id;
|
||||
status = impl_->CreateTable(table);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_TRUE(status.IsAlreadyExist());
|
||||
|
||||
table.table_id_ = "";
|
||||
status = impl_->CreateTable(table);
|
||||
|
||||
@ -3,17 +3,19 @@
|
||||
// Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
// Proprietary and confidential.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#include <gtest/gtest.h>
|
||||
#include <thread>
|
||||
#include <easylogging++.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include "utils.h"
|
||||
#include "db/DB.h"
|
||||
#include "db/DBImpl.h"
|
||||
#include "db/MetaConsts.h"
|
||||
#include "db/Factories.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <easylogging++.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <random>
|
||||
|
||||
using namespace zilliz::milvus;
|
||||
|
||||
namespace {
|
||||
@ -271,8 +273,9 @@ TEST_F(MySQLDBTest, DELETE_TEST) {
|
||||
stat = db_->DescribeTable(table_info_get);
|
||||
ASSERT_STATS(stat);
|
||||
|
||||
// std::cout << "location: " << table_info_get.location_ << std::endl;
|
||||
ASSERT_TRUE(boost::filesystem::exists(table_info_get.location_));
|
||||
bool has_table = false;
|
||||
db_->HasTable(TABLE_NAME, has_table);
|
||||
ASSERT_TRUE(has_table);
|
||||
|
||||
engine::IDNumbers vector_ids;
|
||||
|
||||
@ -295,7 +298,9 @@ TEST_F(MySQLDBTest, DELETE_TEST) {
|
||||
std::this_thread::sleep_for(std::chrono::seconds(5));
|
||||
// std::cout << "5 sec finish" << std::endl;
|
||||
ASSERT_TRUE(stat.ok());
|
||||
// ASSERT_FALSE(boost::filesystem::exists(table_info_get.location_));
|
||||
|
||||
db_->HasTable(TABLE_NAME, has_table);
|
||||
ASSERT_FALSE(has_table);
|
||||
|
||||
delete db_;
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "db/scheduler/task/SearchTask.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
|
||||
using namespace zilliz::milvus;
|
||||
@ -73,13 +73,13 @@ TEST(DBSearchTest, TOPK_TEST) {
|
||||
ASSERT_EQ(src_result.size(), NQ);
|
||||
|
||||
engine::SearchContext::ResultSet target_result;
|
||||
status = engine::SearchTask::TopkResult(target_result, TOP_K, target_result);
|
||||
status = engine::SearchTask::TopkResult(target_result, TOP_K, true, target_result);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
status = engine::SearchTask::TopkResult(target_result, TOP_K, src_result);
|
||||
status = engine::SearchTask::TopkResult(target_result, TOP_K, true, src_result);
|
||||
ASSERT_FALSE(status.ok());
|
||||
|
||||
status = engine::SearchTask::TopkResult(src_result, TOP_K, target_result);
|
||||
status = engine::SearchTask::TopkResult(src_result, TOP_K, true, target_result);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_TRUE(src_result.empty());
|
||||
ASSERT_EQ(target_result.size(), NQ);
|
||||
@ -92,7 +92,7 @@ TEST(DBSearchTest, TOPK_TEST) {
|
||||
status = engine::SearchTask::ClusterResult(src_ids, src_distence, NQ, wrong_topk, src_result);
|
||||
ASSERT_TRUE(status.ok());
|
||||
|
||||
status = engine::SearchTask::TopkResult(src_result, TOP_K, target_result);
|
||||
status = engine::SearchTask::TopkResult(src_result, TOP_K, true, target_result);
|
||||
ASSERT_TRUE(status.ok());
|
||||
for(uint64_t i = 0; i < NQ; i++) {
|
||||
ASSERT_EQ(target_result[i].size(), TOP_K);
|
||||
@ -101,7 +101,7 @@ TEST(DBSearchTest, TOPK_TEST) {
|
||||
wrong_topk = TOP_K + 10;
|
||||
BuildResult(NQ, wrong_topk, src_ids, src_distence);
|
||||
|
||||
status = engine::SearchTask::TopkResult(src_result, TOP_K, target_result);
|
||||
status = engine::SearchTask::TopkResult(src_result, TOP_K, true, target_result);
|
||||
ASSERT_TRUE(status.ok());
|
||||
for(uint64_t i = 0; i < NQ; i++) {
|
||||
ASSERT_EQ(target_result[i].size(), TOP_K);
|
||||
@ -126,7 +126,7 @@ TEST(DBSearchTest, MERGE_TEST) {
|
||||
{
|
||||
engine::SearchContext::Id2DistanceMap src = src_result[0];
|
||||
engine::SearchContext::Id2DistanceMap target = target_result[0];
|
||||
status = engine::SearchTask::MergeResult(src, target, 10);
|
||||
status = engine::SearchTask::MergeResult(src, target, 10, true);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(target.size(), 10);
|
||||
CheckResult(src_result[0], target_result[0], target);
|
||||
@ -135,7 +135,7 @@ TEST(DBSearchTest, MERGE_TEST) {
|
||||
{
|
||||
engine::SearchContext::Id2DistanceMap src = src_result[0];
|
||||
engine::SearchContext::Id2DistanceMap target;
|
||||
status = engine::SearchTask::MergeResult(src, target, 10);
|
||||
status = engine::SearchTask::MergeResult(src, target, 10, true);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(target.size(), src_count);
|
||||
ASSERT_TRUE(src.empty());
|
||||
@ -145,7 +145,7 @@ TEST(DBSearchTest, MERGE_TEST) {
|
||||
{
|
||||
engine::SearchContext::Id2DistanceMap src = src_result[0];
|
||||
engine::SearchContext::Id2DistanceMap target = target_result[0];
|
||||
status = engine::SearchTask::MergeResult(src, target, 30);
|
||||
status = engine::SearchTask::MergeResult(src, target, 30, true);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(target.size(), src_count + target_count);
|
||||
CheckResult(src_result[0], target_result[0], target);
|
||||
@ -154,9 +154,9 @@ TEST(DBSearchTest, MERGE_TEST) {
|
||||
{
|
||||
engine::SearchContext::Id2DistanceMap target = src_result[0];
|
||||
engine::SearchContext::Id2DistanceMap src = target_result[0];
|
||||
status = engine::SearchTask::MergeResult(src, target, 30);
|
||||
status = engine::SearchTask::MergeResult(src, target, 30, true);
|
||||
ASSERT_TRUE(status.ok());
|
||||
ASSERT_EQ(target.size(), src_count + target_count);
|
||||
CheckResult(src_result[0], target_result[0], target);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,6 +106,18 @@ zilliz::milvus::engine::Options MySQLDBTest::GetOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
void NewMemManagerTest::InitLog() {
|
||||
el::Configurations defaultConf;
|
||||
defaultConf.setToDefault();
|
||||
defaultConf.set(el::Level::Debug,
|
||||
el::ConfigurationType::Format, "[%thread-%datetime-%level]: %msg (%fbase:%line)");
|
||||
el::Loggers::reconfigureLogger("default", defaultConf);
|
||||
}
|
||||
|
||||
void NewMemManagerTest::SetUp() {
|
||||
InitLog();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
if (argc > 1) {
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
#define STOP_TIMER(name)
|
||||
#endif
|
||||
|
||||
void ASSERT_STATS(zilliz::milvus::engine::Status& stat);
|
||||
void ASSERT_STATS(zilliz::milvus::engine::Status &stat);
|
||||
|
||||
//class TestEnv : public ::testing::Environment {
|
||||
//public:
|
||||
@ -54,8 +54,8 @@ void ASSERT_STATS(zilliz::milvus::engine::Status& stat);
|
||||
// ::testing::AddGlobalTestEnvironment(new TestEnv);
|
||||
|
||||
class DBTest : public ::testing::Test {
|
||||
protected:
|
||||
zilliz::milvus::engine::DB* db_;
|
||||
protected:
|
||||
zilliz::milvus::engine::DB *db_;
|
||||
|
||||
void InitLog();
|
||||
virtual void SetUp() override;
|
||||
@ -64,13 +64,13 @@ protected:
|
||||
};
|
||||
|
||||
class DBTest2 : public DBTest {
|
||||
protected:
|
||||
protected:
|
||||
virtual zilliz::milvus::engine::Options GetOptions() override;
|
||||
};
|
||||
|
||||
|
||||
class MetaTest : public DBTest {
|
||||
protected:
|
||||
protected:
|
||||
std::shared_ptr<zilliz::milvus::engine::meta::DBMetaImpl> impl_;
|
||||
|
||||
virtual void SetUp() override;
|
||||
@ -78,12 +78,17 @@ protected:
|
||||
};
|
||||
|
||||
class MySQLTest : public ::testing::Test {
|
||||
protected:
|
||||
protected:
|
||||
// std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
|
||||
zilliz::milvus::engine::DBMetaOptions getDBMetaOptions();
|
||||
};
|
||||
|
||||
class MySQLDBTest : public ::testing::Test {
|
||||
protected:
|
||||
class MySQLDBTest : public ::testing::Test {
|
||||
protected:
|
||||
zilliz::milvus::engine::Options GetOptions();
|
||||
};
|
||||
|
||||
class NewMemManagerTest : public ::testing::Test {
|
||||
void InitLog();
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
@ -22,13 +22,13 @@ add_executable(wrapper_test ${wrapper_test_src})
|
||||
|
||||
set(wrapper_libs
|
||||
stdc++
|
||||
boost_system
|
||||
boost_filesystem
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
libgpufaiss.a
|
||||
faiss
|
||||
cudart
|
||||
cublas
|
||||
sqlite3
|
||||
sqlite
|
||||
snappy
|
||||
bz2
|
||||
z
|
||||
|
||||
@ -4,12 +4,15 @@
|
||||
// Proprietary and confidential.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
|
||||
#include "wrapper/Operand.h"
|
||||
#include "wrapper/Index.h"
|
||||
#include "wrapper/IndexBuilder.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <random>
|
||||
|
||||
using namespace zilliz::milvus::engine;
|
||||
|
||||
|
||||
|
||||
@ -33,11 +33,11 @@ set(db_libs
|
||||
nvidia-ml
|
||||
cudart
|
||||
cublas
|
||||
boost_system
|
||||
boost_filesystem
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
lz4
|
||||
crypto
|
||||
boost_serialization
|
||||
boost_serialization_static
|
||||
)
|
||||
|
||||
target_link_libraries(license_test ${db_libs} ${unittest_libs})
|
||||
|
||||
@ -66,9 +66,9 @@ target_link_libraries(metrics_test
|
||||
faiss
|
||||
cudart
|
||||
cublas
|
||||
sqlite3
|
||||
boost_system
|
||||
boost_filesystem
|
||||
sqlite
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
lz4
|
||||
metrics
|
||||
gtest
|
||||
|
||||
@ -37,9 +37,9 @@ set(require_libs
|
||||
faiss
|
||||
cudart
|
||||
cublas
|
||||
sqlite3
|
||||
boost_system
|
||||
boost_filesystem
|
||||
sqlite
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
snappy
|
||||
z
|
||||
bz2
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user