Merge remote-tracking branch 'main/0.6.0' into 0.6.0

This commit is contained in:
yhz 2019-11-16 10:23:14 +08:00
commit f22cbbe805
67 changed files with 1054 additions and 466 deletions

View File

@ -4,13 +4,18 @@ Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.6.0 (TODO)
## Bug
- \#228 - memory usage increased slowly during searching vectors
- \#246 - Exclude src/external folder from code coverage for jenkin ci
- \#248 - Reside src/external in thirdparty
- \#316 - Some files not merged after vectors added
- \#327 - Search does not use GPU when index type is FLAT
- \#340 - Test cases run failed on 0.6.0
## Feature
- \#12 - Pure CPU version for Milvus
- \#77 - Support table partition
- \#226 - Experimental shards middleware for Milvus
- \#127 - Support new Index type IVFPQ
## Improvement
- \#275 - Rename C++ SDK IndexType
@ -18,6 +23,8 @@ Please mark all change in change log and use the ticket from JIRA.
- \#260 - C++ SDK README
- \#314 - add Find FAISS in CMake
- \#310 - Add Q&A for 'protocol https not supported or disable in libcurl' issue
- \#322 - Add option to enable / disable prometheus
- \#358 - Add more information in build.sh and install.md
## Task

335
ci/jenkins/Jenkinsfile vendored
View File

@ -33,128 +33,267 @@ pipeline {
}
stages {
stage("Ubuntu 18.04") {
stage("Ubuntu 18.04 x86_64") {
environment {
OS_NAME = "ubuntu18.04"
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-ubuntu18.04-x86_64-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
]);
DOCKER_VERSION = "${SEMVER}-${OS_NAME}-${LOWER_BUILD_TYPE}"
CPU_ARCH = "amd64"
}
stages {
stage("Run Build") {
agent {
kubernetes {
label 'build'
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/milvus-build-env-pod.yaml'
}
parallel {
stage ("GPU Version") {
environment {
BINRARY_VERSION = "gpu"
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-gpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
]);
DOCKER_VERSION = "${SEMVER}-gpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
}
stages {
stage('Build') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
}
stage("Run Build") {
agent {
kubernetes {
label "${BINRARY_VERSION}-build"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/milvus-gpu-version-build-env-pod.yaml'
}
}
}
stage('Code Coverage') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
stages {
stage('Build') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
}
}
}
}
}
}
stage('Upload Package') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
stage('Code Coverage') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
}
}
}
}
}
}
}
}
stage("Publish docker images") {
agent {
kubernetes {
label 'publish'
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
}
}
stages {
stage('Publish') {
steps {
container('publish-images'){
script {
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
}
}
}
}
}
}
stage("Deploy to Development") {
agent {
kubernetes {
label 'dev-test'
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
}
}
stages {
stage("Deploy to Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
}
}
}
}
stage("Dev Test") {
steps {
container('milvus-test-env') {
script {
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
} else {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
stage('Upload Package') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
}
}
}
}
}
}
stage ("Cleanup Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
stage("Publish docker images") {
agent {
kubernetes {
label "${BINRARY_VERSION}-publish"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
}
}
stages {
stage('Publish') {
steps {
container('publish-images'){
script {
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
}
}
}
}
}
}
stage("Deploy to Development") {
agent {
kubernetes {
label "${BINRARY_VERSION}-dev-test"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
}
}
stages {
stage("Deploy to Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
}
}
}
}
stage("Dev Test") {
steps {
container('milvus-test-env') {
script {
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
} else {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
}
}
}
}
}
stage ("Cleanup Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
}
}
}
}
}
post {
unsuccessful {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
}
}
}
}
}
}
post {
unsuccessful {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
}
stage ("CPU Version") {
environment {
BINRARY_VERSION = "cpu"
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-cpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
]);
DOCKER_VERSION = "${SEMVER}-cpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
}
stages {
stage("Run Build") {
agent {
kubernetes {
label "${BINRARY_VERSION}-build"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/milvus-cpu-version-build-env-pod.yaml'
}
}
stages {
stage('Build') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
}
}
}
}
stage('Code Coverage') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
}
}
}
}
stage('Upload Package') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
}
}
}
}
}
}
stage("Publish docker images") {
agent {
kubernetes {
label "${BINRARY_VERSION}-publish"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
}
}
stages {
stage('Publish') {
steps {
container('publish-images'){
script {
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
}
}
}
}
}
}
stage("Deploy to Development") {
agent {
kubernetes {
label "${BINRARY_VERSION}-dev-test"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
}
}
stages {
stage("Deploy to Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
}
}
}
}
stage("Dev Test") {
steps {
container('milvus-test-env') {
script {
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
} else {
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
}
}
}
}
}
stage ("Cleanup Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
}
}
}
}
}
post {
unsuccessful {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
}
}
}
}
}

View File

@ -0,0 +1,34 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-cpu-build-env
labels:
app: milvus
componet: cpu-build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.6.0-ubuntu18.04
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- cat
tty: true
resources:
limits:
memory: "32Gi"
cpu: "8.0"
requests:
memory: "16Gi"
cpu: "4.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql

View File

@ -1,14 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-build-env
name: milvus-gpu-build-env
labels:
app: milvus
componet: build-env
componet: gpu-build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-build-env:v0.5.1-ubuntu18.04
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.6.0-ubuntu18.04
env:
- name: POD_IP
valueFrom:

View File

@ -1,8 +1,11 @@
timeout(time: 60, unit: 'MINUTES') {
dir ("ci/scripts") {
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -g -j -u -c"
if ("${env.BINRARY_VERSION}" == "gpu") {
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -g -j -u -c"
} else {
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -m -j -u -c"
}
}
}
}

View File

@ -1,12 +1,12 @@
try {
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}", returnStatus: true
if (!helmResult) {
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}"
}
} catch (exc) {
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}", returnStatus: true
if (!helmResult) {
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}"
}
throw exc
}

View File

@ -1,9 +1,13 @@
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo update'
dir ('milvus-helm') {
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
dir ("milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/sqlite_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
dir ("milvus") {
if ("${env.BINRARY_VERSION}" == "gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f gpu_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
} else {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f ci/filebeat/values.yaml --namespace milvus ."
}
}
}

View File

@ -1,6 +1,6 @@
container('publish-images') {
timeout(time: 15, unit: 'MINUTES') {
dir ("docker/deploy/${OS_NAME}") {
dir ("docker/deploy/${env.BINRARY_VERSION}/${env.OS_NAME}") {
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {

View File

@ -1,22 +1,26 @@
timeout(time: 90, unit: 'MINUTES') {
dir ("tests/milvus_python_test") {
sh 'python3 -m pip install -r requirements.txt'
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-milvus-engine.milvus.svc.cluster.local"
}
// mysql database backend test
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
if (!fileExists('milvus-helm')) {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
}
}
dir ("milvus-helm") {
dir ("milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
dir ("milvus") {
if ("${env.BINRARY_VERSION}" == "gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f gpu_values.yaml -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
} else {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
}
}
}
dir ("tests/milvus_python_test") {
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-milvus-engine.milvus.svc.cluster.local"
}
}

View File

@ -1,24 +1,27 @@
timeout(time: 60, unit: 'MINUTES') {
dir ("tests/milvus_python_test") {
sh 'python3 -m pip install -r requirements.txt'
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-milvus-engine.milvus.svc.cluster.local"
}
// mysql database backend test
// load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
// Remove mysql-version tests: 10-28
// if (!fileExists('milvus-helm')) {
// dir ("milvus-helm") {
// checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
// checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
// }
// }
// dir ("milvus-helm") {
// dir ("milvus-gpu") {
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
// dir ("milvus") {
// if ("${env.BINRARY_VERSION}" == "gpu") {
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f gpu_values.yaml -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
// } else {
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
// }
// }
// }
// dir ("tests/milvus_python_test") {
// sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
// sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-milvus-engine.milvus.svc.cluster.local"
// }
}

View File

@ -33,7 +33,7 @@ message(STATUS "Build time = ${BUILD_TIME}")
MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
execute_process(COMMAND sh "-c" "git log --decorate | head -n 1 | sed 's/.*(\\(.*\\))/\\1/' | sed 's/.* \\(.*\\),.*/\\1/' | sed 's=[a-zA-Z]*\/==g'"
OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
ENDMACRO(GET_GIT_BRANCH_NAME)
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
@ -117,17 +117,17 @@ include(DefineOptions)
include(BuildUtils)
include(ThirdPartyPackages)
if(MILVUS_USE_CCACHE)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
message(STATUS "Using ccache: ${CCACHE_FOUND}")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
# let ccache preserve C++ comments, because some of them may be
# meaningful to the compiler
set(ENV{CCACHE_COMMENTS} "1")
endif(CCACHE_FOUND)
endif()
if (MILVUS_USE_CCACHE)
find_program(CCACHE_FOUND ccache)
if (CCACHE_FOUND)
message(STATUS "Using ccache: ${CCACHE_FOUND}")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
# let ccache preserve C++ comments, because some of them may be
# meaningful to the compiler
set(ENV{CCACHE_COMMENTS} "1")
endif (CCACHE_FOUND)
endif ()
set(MILVUS_CPU_VERSION false)
if (MILVUS_GPU_VERSION)
@ -142,6 +142,10 @@ else ()
add_compile_definitions("MILVUS_CPU_VERSION")
endif ()
if (MILVUS_WITH_PROMETHEUS)
add_compile_definitions("MILVUS_WITH_PROMETHEUS")
endif ()
if (CMAKE_BUILD_TYPE STREQUAL "Release")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
if (MILVUS_GPU_VERSION)
@ -176,9 +180,9 @@ endif ()
if (MILVUS_GPU_VERSION)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_gpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
else()
else ()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_cpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
endif()
endif ()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf)

View File

@ -14,64 +14,69 @@ CUSTOMIZATION="OFF" # default use ori faiss
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
GPU_VERSION="OFF" #defaults to CPU version
WITH_MKL="OFF"
FAISS_ROOT=""
FAISS_ROOT="" #FAISS root path
FAISS_SOURCE="BUNDLED"
WITH_PROMETHEUS="ON"
while getopts "p:d:t:f:ulrcgjhxzm" arg
do
case $arg in
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
f)
FAISS_ROOT=$OPTARG
FAISS_SOURCE="AUTO"
;;
u)
echo "Build and run unittest cases" ;
BUILD_UNITTEST="ON";
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
rm ./${BUILD_OUTPUT_DIR} -r
MAKE_CLEAN="ON"
fi
;;
c)
BUILD_COVERAGE="ON"
;;
z)
PROFILING="ON"
;;
j)
USE_JFROG_CACHE="ON"
;;
x)
CUSTOMIZATION="OFF" # force use ori faiss
;;
g)
GPU_VERSION="ON"
;;
m)
WITH_MKL="ON"
;;
h) # help
echo "
while getopts "p:d:t:f:ulrcgjhxzme" arg; do
case $arg in
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
f)
FAISS_ROOT=$OPTARG
FAISS_SOURCE="AUTO"
;;
u)
echo "Build and run unittest cases"
BUILD_UNITTEST="ON"
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
rm ./${BUILD_OUTPUT_DIR} -r
MAKE_CLEAN="ON"
fi
;;
c)
BUILD_COVERAGE="ON"
;;
z)
PROFILING="ON"
;;
j)
USE_JFROG_CACHE="ON"
;;
x)
CUSTOMIZATION="OFF" # force use ori faiss
;;
g)
GPU_VERSION="ON"
;;
m)
WITH_MKL="ON"
;;
e)
WITH_PROMETHEUS="OFF"
;;
h) # help
echo "
parameter:
-p: install prefix(default: $(pwd)/milvus)
-d: db data path(default: /tmp/milvus)
-t: build type(default: Debug)
-f: faiss root path(default: empty)
-f: FAISS root path(default: empty). The path should be an absolute path
containing the pre-installed lib/ and include/ directory of FAISS. If they can't be found,
we will build the original FAISS from source instead.
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-r: remove previous build directory(default: OFF)
@ -80,29 +85,30 @@ parameter:
-j: use jfrog cache build directory(default: OFF)
-g: build GPU version(default: OFF)
-m: build with MKL(default: OFF)
-e: build without prometheus(default: OFF)
-h: help
usage:
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} -f \${FAISS_ROOT} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-h]
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} -f \${FAISS_ROOT} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-e] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
mkdir ${BUILD_OUTPUT_DIR}
mkdir ${BUILD_OUTPUT_DIR}
fi
cd ${BUILD_OUTPUT_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache > /dev/null 2>&1
make rebuild_cache >/dev/null 2>&1
CMAKE_CMD="cmake \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
@ -118,30 +124,31 @@ CMAKE_CMD="cmake \
-DCUSTOMIZATION=${CUSTOMIZATION} \
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
-DFAISS_WITH_MKL=${WITH_MKL} \
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
../"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${MAKE_CLEAN} == "ON" ]]; then
make clean
make clean
fi
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# # clang-tidy check
# make check-clang-tidy
@ -152,11 +159,11 @@ if [[ ${RUN_CPPLINT} == "ON" ]]; then
# echo "clang-tidy check passed!"
else
# strip binary symbol
if [[ ${BUILD_TYPE} != "Debug" ]]; then
strip src/milvus_server
fi
# strip binary symbol
if [[ ${BUILD_TYPE} != "Debug" ]]; then
strip src/milvus_server
fi
# compile and build
make -j 8 install || exit 1
# compile and build
make -j 8 install || exit 1
fi

View File

@ -37,6 +37,7 @@ endforeach ()
aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics metrics_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics/prometheus metrics_prometheus_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db db_main_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/engine db_engine_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/insert db_insert_files)
@ -91,6 +92,11 @@ set(engine_files
${wrapper_files}
)
if (MILVUS_WITH_PROMETHEUS)
set(engine_files ${engine_files}
${metrics_prometheus_files})
endif ()
set(client_grpc_lib
grpcpp_channelz
grpc++
@ -115,7 +121,6 @@ set(third_party_libs
sqlite
${client_grpc_lib}
yaml-cpp
${prometheus_lib}
mysqlpp
zlib
${boost_lib}
@ -138,13 +143,19 @@ if (MILVUS_GPU_VERSION)
)
endif ()
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
if (MILVUS_ENABLE_PROFILING)
set(third_party_libs ${third_party_libs}
gperftools
libunwind
)
endif ()
if (MILVUS_WITH_PROMETHEUS)
set(third_party_libs ${third_party_libs}
${prometheus_lib}
)
endif ()
set(engine_libs
pthread
libgomp.a
@ -166,13 +177,22 @@ target_link_libraries(milvus_engine
${engine_libs}
)
add_library(metrics STATIC ${metrics_files})
if (MILVUS_WITH_PROMETHEUS)
add_library(metrics STATIC ${metrics_files} ${metrics_prometheus_files})
else ()
add_library(metrics STATIC ${metrics_files})
endif ()
set(metrics_lib
yaml-cpp
${prometheus_lib}
)
if (MILVUS_WITH_PROMETHEUS)
set(metrics_lib ${metrics_lib}
${prometheus_lib}
)
endif ()
target_link_libraries(metrics ${metrics_lib})
set(server_libs

View File

@ -179,9 +179,10 @@ DBImpl::PreloadTable(const std::string& table_id) {
}
// get all table files from parent table
meta::DatesT dates;
std::vector<size_t> ids;
meta::TableFilesSchema files_array;
auto status = GetFilesToSearch(table_id, ids, files_array);
auto status = GetFilesToSearch(table_id, ids, dates, files_array);
if (!status.ok()) {
return status;
}
@ -190,7 +191,7 @@ DBImpl::PreloadTable(const std::string& table_id) {
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = GetFilesToSearch(schema.table_id_, ids, files_array);
status = GetFilesToSearch(schema.table_id_, ids, dates, files_array);
}
int64_t size = 0;
@ -304,6 +305,10 @@ DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_
if (!partition_tag.empty()) {
std::string partition_name;
status = meta_ptr_->GetPartitionName(table_id, partition_tag, target_table_name);
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
return status;
}
}
// insert vectors into target table
@ -400,7 +405,7 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& parti
if (partition_tags.empty()) {
// no partition tag specified, means search in whole table
// get all table files from parent table
status = GetFilesToSearch(table_id, ids, files_array);
status = GetFilesToSearch(table_id, ids, dates, files_array);
if (!status.ok()) {
return status;
}
@ -408,7 +413,7 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& parti
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = GetFilesToSearch(schema.table_id_, ids, files_array);
status = GetFilesToSearch(schema.table_id_, ids, dates, files_array);
}
} else {
// get files from specified partitions
@ -416,7 +421,7 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& parti
GetPartitionsByTags(table_id, partition_tags, partition_name_array);
for (auto& partition_name : partition_name_array) {
status = GetFilesToSearch(partition_name, ids, files_array);
status = GetFilesToSearch(partition_name, ids, dates, files_array);
}
}
@ -446,7 +451,7 @@ DBImpl::QueryByFileID(const std::string& table_id, const std::vector<std::string
}
meta::TableFilesSchema files_array;
auto status = GetFilesToSearch(table_id, ids, files_array);
auto status = GetFilesToSearch(table_id, ids, dates, files_array);
if (!status.ok()) {
return status;
}
@ -619,6 +624,18 @@ DBImpl::StartCompactionTask() {
{
std::lock_guard<std::mutex> lck(compact_result_mutex_);
if (compact_thread_results_.empty()) {
// collect merge files for all tables(if compact_table_ids_ is empty) for two reasons:
// 1. other tables may still has un-merged files
// 2. server may be closed unexpected, these un-merge files need to be merged when server restart
if (compact_table_ids_.empty()) {
std::vector<meta::TableSchema> table_schema_array;
meta_ptr_->AllTables(table_schema_array);
for (auto& schema : table_schema_array) {
compact_table_ids_.insert(schema.table_id_);
}
}
// start merge file thread
compact_thread_results_.push_back(
compact_thread_pool_.enqueue(&DBImpl::BackgroundCompaction, this, compact_table_ids_));
compact_table_ids_.clear();
@ -717,7 +734,7 @@ DBImpl::BackgroundMergeFiles(const std::string& table_id) {
for (auto& kv : raw_files) {
auto files = kv.second;
if (files.size() < options_.merge_trigger_number_) {
ENGINE_LOG_DEBUG << "Files number not greater equal than merge trigger number, skip merge action";
ENGINE_LOG_TRACE << "Files number not greater equal than merge trigger number, skip merge action";
continue;
}
@ -734,7 +751,7 @@ DBImpl::BackgroundMergeFiles(const std::string& table_id) {
void
DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
ENGINE_LOG_TRACE << " Background compaction thread start";
ENGINE_LOG_TRACE << "Background compaction thread start";
Status status;
for (auto& table_id : table_ids) {
@ -757,7 +774,7 @@ DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
}
meta_ptr_->CleanUpFilesWithTTL(ttl);
ENGINE_LOG_TRACE << " Background compaction thread exit";
ENGINE_LOG_TRACE << "Background compaction thread exit";
}
void
@ -817,9 +834,8 @@ DBImpl::BackgroundBuildIndex() {
}
Status
DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids,
DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, const meta::DatesT& dates,
meta::TableFilesSchema& files) {
meta::DatesT dates;
meta::DatePartionedTableFilesSchema date_files;
auto status = meta_ptr_->FilesToSearch(table_id, file_ids, dates, date_files);
if (!status.ok()) {

View File

@ -153,7 +153,8 @@ class DBImpl : public DB {
MemSerialize();
Status
GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, meta::TableFilesSchema& files);
GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, const meta::DatesT& dates,
meta::TableFilesSchema& files);
Status
GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,

View File

@ -26,6 +26,7 @@
namespace milvus {
namespace engine {
// TODO(linxj): replace with VecIndex::IndexType
enum class EngineType {
INVALID = 0,
FAISS_IDMAP = 1,
@ -33,7 +34,8 @@ enum class EngineType {
FAISS_IVFSQ8,
NSG_MIX,
FAISS_IVFSQ8H,
MAX_VALUE = FAISS_IVFSQ8H,
FAISS_PQ,
MAX_VALUE = FAISS_PQ,
};
enum class MetricType {

View File

@ -116,6 +116,14 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_HYBRID);
break;
}
case EngineType::FAISS_PQ: {
#ifdef MILVUS_CPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_CPU);
#else
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_MIX);
#endif
break;
}
default: {
ENGINE_LOG_ERROR << "Unsupported index type";
return nullptr;

View File

@ -1392,6 +1392,7 @@ MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFiles
} // Scoped Connection
Status ret;
int64_t to_merge_files = 0;
for (auto& resRow : res) {
TableFileSchema table_file;
table_file.file_size_ = resRow["file_size"];
@ -1420,13 +1421,14 @@ MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFiles
auto dateItr = files.find(table_file.date_);
if (dateItr == files.end()) {
files[table_file.date_] = TableFilesSchema();
to_merge_files++;
}
files[table_file.date_].push_back(table_file);
}
if (res.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-merge files";
if (to_merge_files > 0) {
ENGINE_LOG_TRACE << "Collect " << to_merge_files << " to-merge files";
}
return ret;
} catch (std::exception& e) {
@ -1809,6 +1811,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store();
int64_t remove_tables = 0;
if (!res.empty()) {
std::stringstream idsToDeleteSS;
for (auto& resRow : res) {
@ -1817,7 +1820,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
resRow["table_id"].to_string(table_id);
utils::DeleteTablePath(options_, table_id, false); // only delete empty folder
remove_tables++;
idsToDeleteSS << "id = " << std::to_string(id) << " OR ";
}
std::string idsToDeleteStr = idsToDeleteSS.str();
@ -1832,8 +1835,8 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
}
}
if (res.size() > 0) {
ENGINE_LOG_DEBUG << "Remove " << res.size() << " tables from meta";
if (remove_tables > 0) {
ENGINE_LOG_DEBUG << "Remove " << remove_tables << " tables from meta";
}
} // Scoped Connection
} catch (std::exception& e) {

View File

@ -971,6 +971,7 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFile
order_by(&TableFileSchema::file_size_).desc());
Status result;
int64_t to_merge_files = 0;
for (auto& file : selected) {
TableFileSchema table_file;
table_file.file_size_ = std::get<4>(file);
@ -999,11 +1000,13 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFile
if (dateItr == files.end()) {
files[table_file.date_] = TableFilesSchema();
}
files[table_file.date_].push_back(table_file);
to_merge_files++;
}
if (selected.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-merge files";
if (to_merge_files > 0) {
ENGINE_LOG_TRACE << "Collect " << to_merge_files << " to-merge files";
}
return result;
} catch (std::exception& e) {
@ -1313,16 +1316,18 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
try {
server::MetricCollector metric;
int64_t remove_tables = 0;
for (auto& table_id : table_ids) {
auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_),
where(c(&TableFileSchema::table_id_) == table_id));
if (selected.size() == 0) {
utils::DeleteTablePath(options_, table_id);
remove_tables++;
}
}
if (table_ids.size() > 0) {
ENGINE_LOG_DEBUG << "Remove " << table_ids.size() << " tables folder";
if (remove_tables) {
ENGINE_LOG_DEBUG << "Remove " << remove_tables << " tables folder";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when delete table folder", e.what());

View File

@ -89,34 +89,35 @@ ConvertToDataset(std::vector<SPTAG::QueryResult> query_results) {
}
}
auto id_buf = MakeMutableBufferSmart((uint8_t*)p_id, sizeof(int64_t) * elems);
auto dist_buf = MakeMutableBufferSmart((uint8_t*)p_dist, sizeof(float) * elems);
// TODO: magic
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
auto int64_type = std::make_shared<arrow::Int64Type>();
auto float_type = std::make_shared<arrow::FloatType>();
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
// auto id_array_data = std::make_shared<ArrayData>(int64_type, sizeof(int64_t) * elems, id_bufs);
// auto dist_array_data = std::make_shared<ArrayData>(float_type, sizeof(float) * elems, dist_bufs);
// auto ids = ConstructInt64Array((uint8_t*)p_id, sizeof(int64_t) * elems);
// auto dists = ConstructFloatArray((uint8_t*)p_dist, sizeof(float) * elems);
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
std::vector<ArrayPtr> array{ids, dists};
auto field_id = std::make_shared<Field>("id", std::make_shared<arrow::Int64Type>());
auto field_dist = std::make_shared<Field>("dist", std::make_shared<arrow::FloatType>());
std::vector<FieldPtr> fields{field_id, field_dist};
auto schema = std::make_shared<Schema>(fields);
return std::make_shared<Dataset>(array, schema);
// auto id_buf = MakeMutableBufferSmart((uint8_t*)p_id, sizeof(int64_t) * elems);
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)p_dist, sizeof(float) * elems);
//
// // TODO: magic
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
//
// auto int64_type = std::make_shared<arrow::Int64Type>();
// auto float_type = std::make_shared<arrow::FloatType>();
//
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
// // auto id_array_data = std::make_shared<ArrayData>(int64_type, sizeof(int64_t) * elems, id_bufs);
// // auto dist_array_data = std::make_shared<ArrayData>(float_type, sizeof(float) * elems, dist_bufs);
//
// // auto ids = ConstructInt64Array((uint8_t*)p_id, sizeof(int64_t) * elems);
// // auto dists = ConstructFloatArray((uint8_t*)p_dist, sizeof(float) * elems);
//
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
// std::vector<ArrayPtr> array{ids, dists};
//
// auto field_id = std::make_shared<Field>("id", std::make_shared<arrow::Int64Type>());
// auto field_dist = std::make_shared<Field>("dist", std::make_shared<arrow::FloatType>());
// std::vector<FieldPtr> fields{field_id, field_dist};
// auto schema = std::make_shared<Schema>(fields);
//
// return std::make_shared<Dataset>(array, schema);
return std::make_shared<Dataset>((void*)p_id, (void*)p_dist);
}
} // namespace knowhere

View File

@ -54,6 +54,9 @@ class Dataset {
: tensor_(std::move(tensor)), tensor_schema_(std::move(tensor_schema)) {
}
Dataset(void* ids, void* dists) : ids_(ids), dists_(dists) {
}
Dataset(const Dataset&) = delete;
Dataset&
operator=(const Dataset&) = delete;
@ -128,6 +131,16 @@ class Dataset {
tensor_schema_ = std::move(tensor_schema);
}
void*
ids() {
return ids_;
}
void*
dist() {
return dists_;
}
// const Config &
// meta() const { return meta_; }
@ -141,6 +154,9 @@ class Dataset {
SchemaPtr array_schema_;
std::vector<TensorPtr> tensor_;
SchemaPtr tensor_schema_;
// TODO(yukun): using smart pointer
void* ids_;
void* dists_;
// Config meta_;
};

View File

@ -39,17 +39,19 @@ GPUIVFPQ::Train(const DatasetPtr& dataset, const Config& config) {
GETTENSOR(dataset)
// TODO(linxj): set device here.
// TODO(linxj): set gpu resource here.
faiss::gpu::StandardGpuResources res;
faiss::gpu::GpuIndexIVFPQ device_index(&res, dim, build_cfg->nlist, build_cfg->m, build_cfg->nbits,
GetMetricType(build_cfg->metric_type)); // IP not support
device_index.train(rows, (float*)p_data);
std::shared_ptr<faiss::Index> host_index = nullptr;
host_index.reset(faiss::gpu::index_gpu_to_cpu(&device_index));
return std::make_shared<IVFIndexModel>(host_index);
auto temp_resource = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_);
if (temp_resource != nullptr) {
ResScope rs(temp_resource, gpu_id_, true);
auto device_index = new faiss::gpu::GpuIndexIVFPQ(temp_resource->faiss_res.get(), dim, build_cfg->nlist,
build_cfg->m, build_cfg->nbits,
GetMetricType(build_cfg->metric_type)); // IP not support
device_index->train(rows, (float*)p_data);
std::shared_ptr<faiss::Index> host_index = nullptr;
host_index.reset(faiss::gpu::index_gpu_to_cpu(device_index));
return std::make_shared<IVFIndexModel>(host_index);
} else {
KNOWHERE_THROW_MSG("Build IVFSQ can't get gpu resource");
}
}
std::shared_ptr<faiss::IVFSearchParameters>
@ -66,7 +68,14 @@ GPUIVFPQ::GenParams(const Config& config) {
VectorIndexPtr
GPUIVFPQ::CopyGpuToCpu(const Config& config) {
KNOWHERE_THROW_MSG("not support yet");
std::lock_guard<std::mutex> lk(mutex_);
faiss::Index* device_index = index_.get();
faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(device_index);
std::shared_ptr<faiss::Index> new_index;
new_index.reset(host_index);
return std::make_shared<IVFPQ>(new_index);
}
} // namespace knowhere

View File

@ -18,6 +18,7 @@
#pragma once
#include <memory>
#include <utility>
#include "IndexGPUIVF.h"
@ -28,6 +29,10 @@ class GPUIVFPQ : public GPUIVF {
explicit GPUIVFPQ(const int& device_id) : GPUIVF(device_id) {
}
GPUIVFPQ(std::shared_ptr<faiss::Index> index, const int64_t& device_id, ResPtr& resource)
: GPUIVF(std::move(index), device_id, resource) {
}
IndexModelPtr
Train(const DatasetPtr& dataset, const Config& config) override;

View File

@ -80,23 +80,24 @@ IDMAP::Search(const DatasetPtr& dataset, const Config& config) {
search_impl(rows, (float*)p_data, config->k, res_dis, res_ids, Config());
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
auto int64_type = std::make_shared<arrow::Int64Type>();
auto float_type = std::make_shared<arrow::FloatType>();
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
std::vector<ArrayPtr> array{ids, dists};
return std::make_shared<Dataset>(array, nullptr);
// auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
//
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
//
// auto int64_type = std::make_shared<arrow::Int64Type>();
// auto float_type = std::make_shared<arrow::FloatType>();
//
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
//
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
// std::vector<ArrayPtr> array{ids, dists};
//
// return std::make_shared<Dataset>(array, nullptr);
return std::make_shared<Dataset>((void*)res_ids, (void*)res_dis);
}
void

View File

@ -139,23 +139,23 @@ IVF::Search(const DatasetPtr& dataset, const Config& config) {
// std::cout << ss_res_id.str() << std::endl;
// std::cout << ss_res_dist.str() << std::endl << std::endl;
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
// auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
//
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
//
// auto int64_type = std::make_shared<arrow::Int64Type>();
// auto float_type = std::make_shared<arrow::FloatType>();
//
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
//
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
// std::vector<ArrayPtr> array{ids, dists};
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
auto int64_type = std::make_shared<arrow::Int64Type>();
auto float_type = std::make_shared<arrow::FloatType>();
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
std::vector<ArrayPtr> array{ids, dists};
return std::make_shared<Dataset>(array, nullptr);
return std::make_shared<Dataset>((void*)res_ids, (void*)res_dis);
}
void

View File

@ -17,11 +17,19 @@
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFPQ.h>
#ifdef MILVUS_GPU_VERSION
#include <faiss/gpu/GpuCloner.h>
#endif
#include <memory>
#include <utility>
#include "knowhere/adapter/VectorAdapter.h"
#include "knowhere/common/Exception.h"
#ifdef MILVUS_GPU_VERSION
#include "knowhere/index/vector_index/IndexGPUIVF.h"
#include "knowhere/index/vector_index/IndexGPUIVFPQ.h"
#endif
#include "knowhere/index/vector_index/IndexIVFPQ.h"
namespace knowhere {
@ -60,4 +68,22 @@ IVFPQ::Clone_impl(const std::shared_ptr<faiss::Index>& index) {
return std::make_shared<IVFPQ>(index);
}
VectorIndexPtr
IVFPQ::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
#ifdef MILVUS_GPU_VERSION
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) {
ResScope rs(res, device_id, false);
auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get());
std::shared_ptr<faiss::Index> device_index;
device_index.reset(gpu_index);
return std::make_shared<GPUIVFPQ>(device_index, device_id, res);
} else {
KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource");
}
#else
KNOWHERE_THROW_MSG("Calling IVFPQ::CopyCpuToGpu when we are using CPU version");
#endif
}
} // namespace knowhere

View File

@ -34,6 +34,9 @@ class IVFPQ : public IVF {
IndexModelPtr
Train(const DatasetPtr& dataset, const Config& config) override;
VectorIndexPtr
CopyCpuToGpu(const int64_t& device_id, const Config& config) override;
protected:
std::shared_ptr<faiss::IVFSearchParameters>
GenParams(const Config& config) override;

View File

@ -88,23 +88,24 @@ NSG::Search(const DatasetPtr& dataset, const Config& config) {
s_params.search_length = build_cfg->search_length;
index_->Search((float*)p_data, rows, dim, build_cfg->k, res_dis, res_ids, s_params);
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
// auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
auto int64_type = std::make_shared<arrow::Int64Type>();
auto float_type = std::make_shared<arrow::FloatType>();
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
std::vector<ArrayPtr> array{ids, dists};
return std::make_shared<Dataset>(array, nullptr);
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
//
// auto int64_type = std::make_shared<arrow::Int64Type>();
// auto float_type = std::make_shared<arrow::FloatType>();
//
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
//
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
// std::vector<ArrayPtr> array{ids, dists};
//
// return std::make_shared<Dataset>(array, nullptr);
return std::make_shared<Dataset>((void*)res_ids, (void*)res_dis);
}
IndexModelPtr

View File

@ -49,7 +49,7 @@ CopyCpuToGpu(const VectorIndexPtr& index, const int64_t& device_id, const Config
if (auto cpu_index = std::dynamic_pointer_cast<IVFSQ>(index)) {
return cpu_index->CopyCpuToGpu(device_id, config);
} else if (auto cpu_index = std::dynamic_pointer_cast<IVFPQ>(index)) {
KNOWHERE_THROW_MSG("IVFPQ not support transfer to gpu");
return cpu_index->CopyCpuToGpu(device_id, config);
} else if (auto cpu_index = std::dynamic_pointer_cast<IVF>(index)) {
return cpu_index->CopyCpuToGpu(device_id, config);
} else if (auto cpu_index = std::dynamic_pointer_cast<IDMAP>(index)) {

View File

@ -181,11 +181,13 @@ TEST_P(IVFTest, clone_test) {
// PrintResult(result, nq, k);
auto AssertEqual = [&](knowhere::DatasetPtr p1, knowhere::DatasetPtr p2) {
auto ids_p1 = p1->array()[0];
auto ids_p2 = p2->array()[0];
auto ids_p1 = p1->ids();
auto ids_p2 = p2->ids();
for (int i = 0; i < nq * k; ++i) {
EXPECT_EQ(*(ids_p2->data()->GetValues<int64_t>(1, i)), *(ids_p1->data()->GetValues<int64_t>(1, i)));
EXPECT_EQ(*((int64_t*)(ids_p2) + i), *((int64_t*)(ids_p1) + i));
// EXPECT_EQ(*(ids_p2->data()->GetValues<int64_t>(1, i)), *(ids_p1->data()->GetValues<int64_t>(1,
// i)));
}
};
@ -211,7 +213,7 @@ TEST_P(IVFTest, clone_test) {
{
// copy from gpu to cpu
std::vector<std::string> support_idx_vec{"GPUIVF", "GPUIVFSQ", "IVFSQHybrid"};
std::vector<std::string> support_idx_vec{"GPUIVF", "GPUIVFSQ", "GPUIVFPQ", "IVFSQHybrid"};
auto finder = std::find(support_idx_vec.cbegin(), support_idx_vec.cend(), index_type);
if (finder != support_idx_vec.cend()) {
EXPECT_NO_THROW({
@ -236,7 +238,7 @@ TEST_P(IVFTest, clone_test) {
{
// copy to gpu
std::vector<std::string> support_idx_vec{"IVF", "GPUIVF", "IVFSQ", "GPUIVFSQ"};
std::vector<std::string> support_idx_vec{"IVF", "GPUIVF", "IVFSQ", "GPUIVFSQ", "IVFPQ", "GPUIVFPQ"};
auto finder = std::find(support_idx_vec.cbegin(), support_idx_vec.cend(), index_type);
if (finder != support_idx_vec.cend()) {
EXPECT_NO_THROW({

View File

@ -66,15 +66,19 @@ TEST_F(KDTTest, kdt_basic) {
AssertAnns(result, nq, k);
{
auto ids = result->array()[0];
auto dists = result->array()[1];
// auto ids = result->array()[0];
// auto dists = result->array()[1];
auto ids = result->ids();
auto dists = result->dist();
std::stringstream ss_id;
std::stringstream ss_dist;
for (auto i = 0; i < nq; i++) {
for (auto j = 0; j < k; ++j) {
ss_id << *ids->data()->GetValues<int64_t>(1, i * k + j) << " ";
ss_dist << *dists->data()->GetValues<float>(1, i * k + j) << " ";
ss_id << *((int64_t*)(ids) + i * k + j) << " ";
ss_dist << *((float*)(dists) + i * k + j) << " ";
// ss_id << *ids->data()->GetValues<int64_t>(1, i * k + j) << " ";
// ss_dist << *dists->data()->GetValues<float>(1, i * k + j) << " ";
}
ss_id << std::endl;
ss_dist << std::endl;

View File

@ -151,9 +151,10 @@ generate_query_dataset(int64_t nb, int64_t dim, float* xb) {
void
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
auto ids = result->ids();
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
EXPECT_EQ(i, *((int64_t*)(ids) + i * k));
// EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
}
}

View File

@ -16,8 +16,10 @@
// under the License.
#include "metrics/Metrics.h"
#include "PrometheusMetrics.h"
#include "server/Config.h"
#ifdef MILVUS_WITH_PROMETHEUS
#include "metrics/prometheus/PrometheusMetrics.h"
#endif
#include <string>
@ -37,11 +39,15 @@ Metrics::CreateMetricsCollector() {
config.GetMetricConfigCollector(collector_type_str);
#ifdef MILVUS_WITH_PROMETHEUS
if (collector_type_str == "prometheus") {
return PrometheusMetrics::GetInstance();
} else {
return MetricsBase::GetInstance();
}
#else
return MetricsBase::GetInstance();
#endif
}
} // namespace server

View File

@ -15,9 +15,9 @@
// specific language governing permissions and limitations
// under the License.
#include "metrics/PrometheusMetrics.h"
#include "SystemInfo.h"
#include "metrics/prometheus/PrometheusMetrics.h"
#include "cache/GpuCacheMgr.h"
#include "metrics/SystemInfo.h"
#include "server/Config.h"
#include "utils/Log.h"

View File

@ -24,7 +24,7 @@
#include <string>
#include <vector>
#include "MetricBase.h"
#include "metrics/MetricBase.h"
#include "utils/Error.h"
#define METRICS_NOW_TIME std::chrono::system_clock::now()

View File

@ -33,8 +33,7 @@ namespace milvus {
namespace interface {
struct dumpable {
virtual ~dumpable() {
}
virtual ~dumpable() = default;
virtual json
Dump() const = 0;

View File

@ -34,7 +34,8 @@ OnlyGPUPass::Run(const TaskPtr& task) {
auto search_task = std::static_pointer_cast<XSearchTask>(task);
if (search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFSQ8 &&
search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFFLAT) {
search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFFLAT &&
search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IDMAP) {
return false;
}

View File

@ -16,6 +16,7 @@
// under the License.
#include "wrapper/ConfAdapter.h"
#include "WrapperException.h"
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
#include "utils/Log.h"
@ -76,7 +77,7 @@ IVFConfAdapter::MatchNlist(const int64_t& size, const int64_t& nlist) {
if (size <= TYPICAL_COUNT / 16384 + 1) {
// handle less row count, avoid nlist set to 0
return 1;
} else if (int(size / TYPICAL_COUNT) * nlist == 0) {
} else if (int(size / TYPICAL_COUNT) * nlist <= 0) {
// calculate a proper nlist if nlist not specified or size less than TYPICAL_COUNT
return int(size / TYPICAL_COUNT * 16384);
}
@ -87,7 +88,11 @@ knowhere::Config
IVFConfAdapter::MatchSearch(const TempMetaConf& metaconf, const IndexType& type) {
auto conf = std::make_shared<knowhere::IVFCfg>();
conf->k = metaconf.k;
conf->nprobe = metaconf.nprobe;
if (metaconf.nprobe <= 0)
conf->nprobe = 16; // hardcode here
else
conf->nprobe = metaconf.nprobe;
switch (type) {
case IndexType::FAISS_IVFFLAT_GPU:
@ -123,11 +128,47 @@ IVFPQConfAdapter::Match(const TempMetaConf& metaconf) {
conf->metric_type = metaconf.metric_type;
conf->gpu_id = conf->gpu_id;
conf->nbits = 8;
conf->m = 8;
if (!(conf->d % 4))
conf->m = conf->d / 4; // compression radio = 16
else if (!(conf->d % 2))
conf->m = conf->d / 2; // compression radio = 8
else if (!(conf->d % 3))
conf->m = conf->d / 3; // compression radio = 12
else
conf->m = conf->d; // same as SQ8, compression radio = 4
MatchBase(conf);
return conf;
}
knowhere::Config
IVFPQConfAdapter::MatchSearch(const TempMetaConf& metaconf, const IndexType& type) {
auto conf = std::make_shared<knowhere::IVFPQCfg>();
conf->k = metaconf.k;
if (metaconf.nprobe <= 0) {
WRAPPER_LOG_ERROR << "The nprobe of PQ is wrong!";
throw WrapperException("The nprobe of PQ is wrong!");
} else {
conf->nprobe = metaconf.nprobe;
}
return conf;
}
int64_t
IVFPQConfAdapter::MatchNlist(const int64_t& size, const int64_t& nlist) {
if (size <= TYPICAL_COUNT / 16384 + 1) {
// handle less row count, avoid nlist set to 0
return 1;
} else if (int(size / TYPICAL_COUNT) * nlist <= 0) {
// calculate a proper nlist if nlist not specified or size less than TYPICAL_COUNT
return int(size / TYPICAL_COUNT * 16384);
}
return nlist;
}
knowhere::Config
NSGConfAdapter::Match(const TempMetaConf& metaconf) {
auto conf = std::make_shared<knowhere::NSGCfg>();
@ -136,13 +177,14 @@ NSGConfAdapter::Match(const TempMetaConf& metaconf) {
conf->metric_type = metaconf.metric_type;
conf->gpu_id = conf->gpu_id;
double factor = metaconf.size / TYPICAL_COUNT;
auto scale_factor = round(metaconf.dim / 128.0);
scale_factor = scale_factor >= 4 ? 4 : scale_factor;
conf->nprobe = 6 + 10 * scale_factor;
conf->knng = 100 + 100 * scale_factor;
conf->search_length = 40 + 5 * scale_factor;
conf->out_degree = 50 + 5 * scale_factor;
conf->candidate_pool_size = 200 + 100 * scale_factor;
conf->nprobe = conf->nlist > 10000 ? conf->nlist * 0.02 : conf->nlist * 0.1;
conf->knng = (100 + 100 * scale_factor) * factor;
conf->search_length = (40 + 5 * scale_factor) * factor;
conf->out_degree = (50 + 5 * scale_factor) * factor;
conf->candidate_pool_size = (200 + 100 * scale_factor) * factor;
MatchBase(conf);
// WRAPPER_LOG_DEBUG << "nlist: " << conf->nlist
@ -156,6 +198,9 @@ NSGConfAdapter::MatchSearch(const TempMetaConf& metaconf, const IndexType& type)
auto conf = std::make_shared<knowhere::NSGCfg>();
conf->k = metaconf.k;
conf->search_length = metaconf.search_length;
if (metaconf.search_length == TEMPMETA_DEFAULT_VALUE) {
conf->search_length = 30; // TODO(linxj): hardcode here.
}
return conf;
}

View File

@ -79,6 +79,13 @@ class IVFPQConfAdapter : public IVFConfAdapter {
public:
knowhere::Config
Match(const TempMetaConf& metaconf) override;
knowhere::Config
MatchSearch(const TempMetaConf& metaconf, const IndexType& type) override;
protected:
static int64_t
MatchNlist(const int64_t& size, const int64_t& nlist);
};
class NSGConfAdapter : public IVFConfAdapter {

View File

@ -53,6 +53,7 @@ AdapterMgr::RegisterAdapter() {
REGISTER_CONF_ADAPTER(IVFPQConfAdapter, IndexType::FAISS_IVFPQ_CPU, ivfpq_cpu);
REGISTER_CONF_ADAPTER(IVFPQConfAdapter, IndexType::FAISS_IVFPQ_GPU, ivfpq_gpu);
REGISTER_CONF_ADAPTER(IVFPQConfAdapter, IndexType::FAISS_IVFPQ_MIX, ivfpq_mix);
REGISTER_CONF_ADAPTER(NSGConfAdapter, IndexType::NSG_MIX, nsg_mix);
}

View File

@ -84,8 +84,8 @@ VecIndexImpl::Search(const int64_t& nq, const float* xq, float* dist, int64_t* i
Config search_cfg = cfg;
auto res = index_->Search(dataset, search_cfg);
auto ids_array = res->array()[0];
auto dis_array = res->array()[1];
// auto ids_array = res->array()[0];
// auto dis_array = res->array()[1];
//{
// auto& ids = ids_array;
@ -104,12 +104,14 @@ VecIndexImpl::Search(const int64_t& nq, const float* xq, float* dist, int64_t* i
// std::cout << "dist\n" << ss_dist.str() << std::endl;
//}
auto p_ids = ids_array->data()->GetValues<int64_t>(1, 0);
auto p_dist = dis_array->data()->GetValues<float>(1, 0);
// auto p_ids = ids_array->data()->GetValues<int64_t>(1, 0);
// auto p_dist = dis_array->data()->GetValues<float>(1, 0);
// TODO(linxj): avoid copy here.
memcpy(ids, p_ids, sizeof(int64_t) * nq * k);
memcpy(dist, p_dist, sizeof(float) * nq * k);
memcpy(ids, res->ids(), sizeof(int64_t) * nq * k);
memcpy(dist, res->dist(), sizeof(float) * nq * k);
free(res->ids());
free(res->dist());
} catch (knowhere::KnowhereException& e) {
WRAPPER_LOG_ERROR << e.what();
return Status(KNOWHERE_UNEXPECTED_ERROR, e.what());

View File

@ -145,6 +145,10 @@ GetVecIndexFactory(const IndexType& type, const Config& cfg) {
index = std::make_shared<knowhere::GPUIVFPQ>(gpu_device);
break;
}
case IndexType::FAISS_IVFPQ_MIX: {
index = std::make_shared<knowhere::GPUIVFPQ>(gpu_device);
return std::make_shared<IVFMixIndex>(index, IndexType::FAISS_IVFPQ_MIX);
}
case IndexType::FAISS_IVFSQ8_MIX: {
index = std::make_shared<knowhere::GPUIVFSQ>(gpu_device);
return std::make_shared<IVFMixIndex>(index, IndexType::FAISS_IVFSQ8_MIX);
@ -276,6 +280,10 @@ ConvertToCpuIndexType(const IndexType& type) {
case IndexType::FAISS_IVFSQ8_MIX: {
return IndexType::FAISS_IVFSQ8_CPU;
}
case IndexType::FAISS_IVFPQ_GPU:
case IndexType::FAISS_IVFPQ_MIX: {
return IndexType::FAISS_IVFPQ_CPU;
}
default: { return type; }
}
}
@ -291,9 +299,12 @@ ConvertToGpuIndexType(const IndexType& type) {
case IndexType::FAISS_IVFSQ8_CPU: {
return IndexType::FAISS_IVFSQ8_GPU;
}
case IndexType::FAISS_IVFPQ_MIX:
case IndexType::FAISS_IVFPQ_CPU: {
return IndexType::FAISS_IVFPQ_GPU;
}
default: { return type; }
}
}
} // namespace engine
} // namespace milvus

View File

@ -33,6 +33,7 @@ namespace engine {
using Config = knowhere::Config;
// TODO(linxj): replace with string, Do refactor serialization
enum class IndexType {
INVALID = 0,
FAISS_IDMAP = 1,
@ -47,6 +48,7 @@ enum class IndexType {
FAISS_IVFSQ8_GPU,
FAISS_IVFSQ8_HYBRID, // only support build on gpu.
NSG_MIX,
FAISS_IVFPQ_MIX,
};
class VecIndex;

View File

@ -110,12 +110,18 @@ set(unittest_libs
pthread
metrics
gfortran
prometheus-cpp-pull
prometheus-cpp-push
prometheus-cpp-core
dl
z
)
if (MILVUS_WITH_PROMETHEUS)
set(unittest_libs ${unittest_libs}
prometheus-cpp-push
prometheus-cpp-pull
prometheus-cpp-core
)
endif ()
set(unittest_libs ${unittest_libs}
dl
z
)
if (MILVUS_GPU_VERSION)
include_directories("${CUDA_INCLUDE_DIRS}")
@ -135,4 +141,4 @@ add_subdirectory(db)
add_subdirectory(wrapper)
add_subdirectory(metrics)
add_subdirectory(scheduler)
add_subdirectory(server)
add_subdirectory(server)

View File

@ -18,10 +18,15 @@
#-------------------------------------------------------------------------------
set(test_files
${CMAKE_CURRENT_SOURCE_DIR}/test_metricbase.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_metrics.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_prometheus.cpp
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp)
test_metricbase.cpp
test_metrics.cpp
utils.cpp
)
if (MILVUS_WITH_PROMETHEUS)
set(test_files ${test_files}
test_prometheus.cpp)
endif ()
add_executable(test_metrics
${common_files}

View File

@ -15,8 +15,8 @@
// specific language governing permissions and limitations
// under the License.
#include "metrics/PrometheusMetrics.h"
#include "server/Config.h"
#include "metrics/prometheus/PrometheusMetrics.h"
#include <gtest/gtest.h>
#include <iostream>

View File

@ -29,33 +29,40 @@
INITIALIZE_EASYLOGGINGPP
using ::testing::Combine;
using ::testing::TestWithParam;
using ::testing::Values;
using ::testing::Combine;
class KnowhereWrapperTest
: public DataGenBase,
public TestWithParam<::std::tuple<milvus::engine::IndexType, std::string, int, int, int, int>> {
: public DataGenBase,
public TestWithParam<::std::tuple<milvus::engine::IndexType, std::string, int, int, int, int>> {
protected:
void
SetUp() override {
void SetUp() override {
#ifdef MILVUS_GPU_VERSION
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
#endif
std::string generator_type;
std::tie(index_type, generator_type, dim, nb, nq, k) = GetParam();
GenData(dim, nb, nq, xb, xq, ids, k, gt_ids, gt_dis);
milvus::engine::TempMetaConf tempconf;
tempconf.metric_type = knowhere::METRICTYPE::L2;
tempconf.gpu_id = DEVICEID;
tempconf.size = nb;
tempconf.dim = dim;
tempconf.k = k;
tempconf.nprobe = 16;
index_ = GetVecIndexFactory(index_type);
conf = ParamGenerator::GetInstance().Gen(index_type);
conf->k = k;
conf->d = dim;
conf->gpu_id = DEVICEID;
conf = ParamGenerator::GetInstance().GenBuild(index_type, tempconf);
searchconf = ParamGenerator::GetInstance().GenSearchConf(index_type, tempconf);
// conf->k = k;
// conf->d = dim;
// conf->gpu_id = DEVICEID;
}
void
TearDown() override {
void TearDown() override {
#ifdef MILVUS_GPU_VERSION
knowhere::FaissGpuResourceMgr::GetInstance().Free();
#endif
@ -65,24 +72,27 @@ class KnowhereWrapperTest
milvus::engine::IndexType index_type;
milvus::engine::VecIndexPtr index_ = nullptr;
knowhere::Config conf;
knowhere::Config searchconf;
};
INSTANTIATE_TEST_CASE_P(
WrapperParam, KnowhereWrapperTest,
Values(
//["Index type", "Generator type", "dim", "nb", "nq", "k", "build config", "search config"]
INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest,
Values(
//["Index type", "Generator type", "dim", "nb", "nq", "k", "build config", "search config"]
#ifdef MILVUS_GPU_VERSION
std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_GPU, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_MIX, "Default", 64, 100000, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_MIX, "Default", 64, 1000, 10, 10),
// std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB,
// 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_MIX, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFPQ_MIX, "Default", 64, 1000, 10, 10),
// std::make_tuple(IndexType::NSG_MIX, "Default", 128, 250000, 10, 10),
#endif
// std::make_tuple(IndexType::SPTAG_KDT_RNT_CPU, "Default", 128, 250000, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IDMAP, "Default", 64, 100000, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_CPU, "Default", 64, 100000, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IDMAP, "Default", 64, 1000, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFFLAT_CPU, "Default", 64, 1000, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_CPU, "Default", DIM, NB, 10, 10)));
TEST_P(KnowhereWrapperTest, BASE_TEST) {
@ -93,12 +103,11 @@ TEST_P(KnowhereWrapperTest, BASE_TEST) {
std::vector<float> res_dis(elems);
index_->BuildAll(nb, xb.data(), ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
AssertResult(res_ids, res_dis);
}
#ifdef MILVUS_GPU_VERSION
TEST_P(KnowhereWrapperTest, TO_GPU_TEST) {
EXPECT_EQ(index_->GetType(), index_type);
@ -107,13 +116,13 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) {
std::vector<float> res_dis(elems);
index_->BuildAll(nb, xb.data(), ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
AssertResult(res_ids, res_dis);
{
auto dev_idx = index_->CopyToGpu(DEVICEID);
for (int i = 0; i < 10; ++i) {
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
}
AssertResult(res_ids, res_dis);
}
@ -125,7 +134,7 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) {
auto dev_idx = new_index->CopyToGpu(DEVICEID);
for (int i = 0; i < 10; ++i) {
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
}
AssertResult(res_ids, res_dis);
}
@ -139,7 +148,7 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) {
std::vector<int64_t> res_ids(elems);
std::vector<float> res_dis(elems);
index_->BuildAll(nb, xb.data(), ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
AssertResult(res_ids, res_dis);
{
@ -152,7 +161,7 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) {
std::vector<int64_t> res_ids(elems);
std::vector<float> res_dis(elems);
new_index->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
new_index->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
AssertResult(res_ids, res_dis);
}
@ -166,7 +175,7 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) {
std::vector<int64_t> res_ids(elems);
std::vector<float> res_dis(elems);
new_index->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
new_index->Search(nq, xq.data(), res_dis.data(), res_ids.data(), searchconf);
AssertResult(res_ids, res_dis);
}
}

View File

@ -15,18 +15,22 @@
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <gtest/gtest.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <memory>
#include <vector>
#include <cstdlib>
#include <cstdio>
#include <fstream>
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
#include "wrapper/VecIndex.h"
#include "wrapper/utils.h"
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
#include "wrapper/ConfAdapterMgr.h"
#include "wrapper/ConfAdapter.h"
class DataGenBase;
@ -40,29 +44,31 @@ constexpr int64_t PINMEM = 1024 * 1024 * 200;
constexpr int64_t TEMPMEM = 1024 * 1024 * 300;
constexpr int64_t RESNUM = 2;
static const char* CONFIG_PATH = "/tmp/milvus_test";
static const char* CONFIG_FILE = "/server_config.yaml";
static const char *CONFIG_PATH = "/tmp/milvus_test";
static const char *CONFIG_FILE = "/server_config.yaml";
class KnowhereTest : public ::testing::Test {
protected:
void
SetUp() override;
void
TearDown() override;
void SetUp() override;
void TearDown() override;
};
class DataGenBase {
public:
virtual void
GenData(const int& dim, const int& nb, const int& nq, float* xb, float* xq, int64_t* ids, const int& k,
int64_t* gt_ids, float* gt_dis);
virtual void GenData(const int& dim, const int& nb, const int& nq, float* xb, float* xq, int64_t* ids,
const int& k, int64_t* gt_ids, float* gt_dis);
virtual void
GenData(const int& dim, const int& nb, const int& nq, std::vector<float>& xb, std::vector<float>& xq,
std::vector<int64_t>& ids, const int& k, std::vector<int64_t>& gt_ids, std::vector<float>& gt_dis);
virtual void GenData(const int& dim,
const int& nb,
const int& nq,
std::vector<float>& xb,
std::vector<float>& xq,
std::vector<int64_t>& ids,
const int& k,
std::vector<int64_t>& gt_ids,
std::vector<float>& gt_dis);
void
AssertResult(const std::vector<int64_t>& ids, const std::vector<float>& dis);
void AssertResult(const std::vector<int64_t>& ids, const std::vector<float>& dis);
int dim = DIM;
int nb = NB;
@ -79,14 +85,22 @@ class DataGenBase {
class ParamGenerator {
public:
static ParamGenerator&
GetInstance() {
static ParamGenerator& GetInstance() {
static ParamGenerator instance;
return instance;
}
knowhere::Config
Gen(const milvus::engine::IndexType& type) {
knowhere::Config GenSearchConf(const milvus::engine::IndexType& type, const milvus::engine::TempMetaConf& conf) {
auto adapter = milvus::engine::AdapterMgr::GetInstance().GetAdapter(type);
return adapter->MatchSearch(conf, type);
}
knowhere::Config GenBuild(const milvus::engine::IndexType& type, const milvus::engine::TempMetaConf& conf) {
auto adapter = milvus::engine::AdapterMgr::GetInstance().GetAdapter(type);
return adapter->Match(conf);
}
knowhere::Config Gen(const milvus::engine::IndexType& type) {
switch (type) {
case milvus::engine::IndexType::FAISS_IDMAP: {
auto tempconf = std::make_shared<knowhere::Cfg>();
@ -113,34 +127,37 @@ class ParamGenerator {
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
// case milvus::engine::IndexType::FAISS_IVFPQ_CPU:
// case milvus::engine::IndexType::FAISS_IVFPQ_GPU: {
// auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
// tempconf->nlist = 100;
// tempconf->nprobe = 16;
// tempconf->nbits = 8;
// tempconf->m = 8;
// tempconf->metric_type = knowhere::METRICTYPE::L2;
// return tempconf;
// }
// case milvus::engine::IndexType::NSG_MIX: {
// auto tempconf = std::make_shared<knowhere::NSGCfg>();
// tempconf->nlist = 100;
// tempconf->nprobe = 16;
// tempconf->search_length = 8;
// tempconf->knng = 200;
// tempconf->search_length = 40; // TODO(linxj): be 20 when search
// tempconf->out_degree = 60;
// tempconf->candidate_pool_size = 200;
// tempconf->metric_type = knowhere::METRICTYPE::L2;
// return tempconf;
// }
case milvus::engine::IndexType::FAISS_IVFPQ_CPU:
case milvus::engine::IndexType::FAISS_IVFPQ_GPU:
case milvus::engine::IndexType::FAISS_IVFPQ_MIX: {
auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->nbits = 8;
tempconf->m = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::NSG_MIX: {
auto tempconf = std::make_shared<knowhere::NSGCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->search_length = 8;
tempconf->knng = 200;
tempconf->search_length = 40; // TODO(linxj): be 20 when search
tempconf->out_degree = 60;
tempconf->candidate_pool_size = 200;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
}
}
};
// class SanityCheck : public DataGenBase {
//class SanityCheck : public DataGenBase {
// public:
// void GenData(const int &dim, const int &nb, const int &nq, float *xb, float *xq, long *ids,
// const int &k, long *gt_ids, float *gt_dis) override;
//};

View File

@ -0,0 +1,29 @@
FROM ubuntu:16.04
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates gnupg2 apt-transport-https && \
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list' && \
wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local && \
apt-get update && apt-get install -y --no-install-recommends \
g++ git gfortran lsb-core \
libboost-serialization-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev \
curl libtool automake libssl-dev pkg-config libcurl4-openssl-dev python3-pip \
clang-format-6.0 clang-tidy-6.0 \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \
/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
WORKDIR /opt/milvus
ENTRYPOINT [ "/app/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -0,0 +1,29 @@
FROM ubuntu:18.04
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates gnupg2 && \
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list' && \
wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local && \
apt-get update && apt-get install -y --no-install-recommends \
g++ git gfortran lsb-core \
libboost-serialization-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev \
curl libtool automake libssl-dev pkg-config libcurl4-openssl-dev python3-pip \
clang-format-6.0 clang-tidy-6.0 \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \
/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
WORKDIR /opt/milvus
ENTRYPOINT [ "/app/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -11,15 +11,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends wget && \
git flex bison gfortran lsb-core \
curl libtool automake libboost1.58-all-dev libssl-dev pkg-config libcurl4-openssl-dev python3-pip \
clang-format-6.0 clang-tidy-6.0 \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.4-243 intel-mkl-core-2019.4-243 && \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
WORKDIR /opt/milvus
ENTRYPOINT [ "/app/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -e
if [ "$1" = 'start' ]; then
tail -f /dev/null
fi
exec "$@"

View File

@ -11,15 +11,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends wget && \
git flex bison gfortran lsb-core \
curl libtool automake libboost-all-dev libssl-dev pkg-config libcurl4-openssl-dev python3-pip \
clang-format-6.0 clang-tidy-6.0 \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.4-243 intel-mkl-core-2019.4-243 && \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
WORKDIR /opt/milvus
ENTRYPOINT [ "/app/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -e
if [ "$1" = 'start' ]; then
tail -f /dev/null
fi
exec "$@"

View File

@ -0,0 +1,20 @@
FROM ubuntu:16.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gfortran libsqlite3-dev libmysqlclient-dev libcurl4-openssl-dev python3 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
COPY ./docker-entrypoint.sh /opt
COPY ./milvus /opt/milvus
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/milvus/lib"
WORKDIR /opt/milvus
ENTRYPOINT [ "/opt/docker-entrypoint.sh" ]
CMD [ "start" ]
EXPOSE 19530

View File

@ -7,4 +7,3 @@ if [ "$1" == 'start' ]; then
fi
exec "$@"

View File

@ -0,0 +1,21 @@
FROM ubuntu:18.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gfortran libsqlite3-dev libmysqlclient-dev libcurl4-openssl-dev python3 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
COPY ./docker-entrypoint.sh /opt
COPY ./milvus /opt/milvus
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/milvus/lib"
WORKDIR /opt/milvus
ENTRYPOINT [ "/opt/docker-entrypoint.sh" ]
CMD [ "start" ]
EXPOSE 19530

View File

@ -7,4 +7,3 @@ if [ "$1" == 'start' ]; then
fi
exec "$@"

View File

@ -15,6 +15,8 @@ COPY ./docker-entrypoint.sh /opt
COPY ./milvus /opt/milvus
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/milvus/lib"
WORKDIR /opt/milvus
ENTRYPOINT [ "/opt/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -e
if [ "$1" == 'start' ]; then
cd /opt/milvus/scripts && ./start_server.sh
fi
exec "$@"

View File

@ -15,6 +15,8 @@ COPY ./docker-entrypoint.sh /opt
COPY ./milvus /opt/milvus
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/milvus/lib"
WORKDIR /opt/milvus
ENTRYPOINT [ "/opt/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -e
if [ "$1" == 'start' ]; then
cd /opt/milvus/scripts && ./start_server.sh
fi
exec "$@"

View File

@ -29,10 +29,15 @@ $ ./build.sh -t Release
```
By default, it will build CPU version. To build GPU version, add `-g` option
```
```shell
$ ./build.sh -g
```
If you want to know the complete build options, run
```shell
$./build.sh -h
```
When the build is completed, all the stuff that you need in order to run Milvus will be installed under `[Milvus root path]/core/milvus`.
## Launch Milvus server
@ -43,13 +48,13 @@ $ cd [Milvus root path]/core/milvus
Add `lib/` directory to `LD_LIBRARY_PATH`
```
```shell
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:[Milvus root path]/core/milvus/lib
```
Then start Milvus server:
```
```shell
$ cd scripts
$ ./start_server.sh
```
@ -65,7 +70,7 @@ $ ./stop_server.sh
`protocol https not supported or disabled in libcurl`.
First, make sure you have `libcurl4-openssl-dev` installed in your system.
Then try reinstall CMake from source with `--system-curl` option:
```
```shell
$ ./bootstrap --system-curl
$ make
$ sudo make install

View File

@ -99,7 +99,7 @@
<dependency>
<groupId>io.milvus</groupId>
<artifactId>milvus-sdk-java</artifactId>
<version>0.2.0-SNAPSHOT</version>
<version>0.3.0</version>
</dependency>
<!-- <dependency>-->
@ -134,4 +134,4 @@
</dependencies>
</project>
</project>

View File

@ -1,7 +1,7 @@
import logging
import pytest
__version__ = '0.5.1'
__version__ = '0.6.0'
class TestPing: