mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-29 15:05:31 +08:00
Merge remote-tracking branch 'main/0.5.1' into 0.5.1
Former-commit-id: d98c3a065f0a643ff3c9c3b34cdc9361b34bd1a1
This commit is contained in:
commit
ca0f5e465e
@ -5,9 +5,13 @@ Please mark all change in change log and use the ticket from JIRA.
|
||||
# Milvus 0.5.1 (TODO)
|
||||
|
||||
## Bug
|
||||
- \#104 - test_scheduler core dump
|
||||
|
||||
## Improvement
|
||||
- \#64 - Improvement dump function in scheduler
|
||||
- \#80 - Print version information into log during server start
|
||||
- \#82 - Move easyloggingpp into "external" directory
|
||||
- \#92 - Speed up CMake build process
|
||||
|
||||
## Feature
|
||||
## Task
|
||||
|
||||
18
ci/jenkins/Jenkinsfile
vendored
18
ci/jenkins/Jenkinsfile
vendored
@ -1,6 +1,11 @@
|
||||
String cron_string = BRANCH_NAME == "master" ? "H 0 * * *" : ""
|
||||
cron_string = BRANCH_NAME == "0.5.1" ? "H 1 * * *" : cron_string
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
triggers { cron(cron_string) }
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
@ -119,7 +124,12 @@ pipeline {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/singleDevTest.groovy"
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,3 +160,9 @@ pipeline {
|
||||
}
|
||||
}
|
||||
|
||||
boolean isTimeTriggeredBuild() {
|
||||
if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
try {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
if (!helmResult) {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
}
|
||||
} catch (exc) {
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
if (!helmResult) {
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
timeout(time: 60, unit: 'MINUTES') {
|
||||
timeout(time: 30, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP"
|
||||
// Set some env variables so codecov detection script works correctly
|
||||
|
||||
@ -1,14 +1,9 @@
|
||||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo update'
|
||||
dir ('milvus-helm') {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/values.yaml --namespace milvus ."
|
||||
}
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo update'
|
||||
dir ('milvus-helm') {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
throw exc
|
||||
}
|
||||
|
||||
|
||||
22
ci/jenkins/jenkinsfile/singleDevNightlyTest.groovy
Normal file
22
ci/jenkins/jenkinsfile/singleDevNightlyTest.groovy
Normal file
@ -0,0 +1,22 @@
|
||||
timeout(time: 90, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
}
|
||||
@ -1,4 +1,4 @@
|
||||
timeout(time: 30, unit: 'MINUTES') {
|
||||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
@ -13,7 +13,7 @@ timeout(time: 30, unit: 'MINUTES') {
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml --namespace milvus ."
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
dir ("tests/milvus_python_test") {
|
||||
|
||||
@ -109,6 +109,7 @@ for test in `ls ${DIR_UNITTEST}`; do
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ${args}
|
||||
echo ${DIR_UNITTEST}/${test} "run failed"
|
||||
exit -1
|
||||
fi
|
||||
done
|
||||
|
||||
@ -134,5 +135,10 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
||||
"*/src/external/easyloggingpp/easylogging++.h" \
|
||||
"*/src/external/easyloggingpp/easylogging++.cc"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "gen ${FILE_INFO_OUTPUT_NEW} failed"
|
||||
exit -2
|
||||
fi
|
||||
|
||||
# gen html report
|
||||
# ${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
|
||||
|
||||
@ -71,7 +71,7 @@ if(MILVUS_VERSION_MAJOR STREQUAL ""
|
||||
endif()
|
||||
|
||||
message(STATUS "Build version = ${MILVUS_VERSION}")
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/version.h)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h)
|
||||
|
||||
message(STATUS "Milvus version: "
|
||||
"${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} "
|
||||
|
||||
@ -55,21 +55,10 @@ define_option_string(MILVUS_DEPENDENCY_SOURCE
|
||||
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
|
||||
"Show output from ExternalProjects rather than just logging to files" ON)
|
||||
|
||||
define_option(MILVUS_BOOST_VENDORED "Use vendored Boost instead of existing Boost. \
|
||||
Note that this requires linking Boost statically" OFF)
|
||||
|
||||
define_option(MILVUS_BOOST_HEADER_ONLY "Use only BOOST headers" OFF)
|
||||
|
||||
define_option(MILVUS_WITH_BZ2 "Build with BZ2 compression" ON)
|
||||
|
||||
define_option(MILVUS_WITH_EASYLOGGINGPP "Build with Easylogging++ library" ON)
|
||||
|
||||
define_option(MILVUS_WITH_LZ4 "Build with lz4 compression" ON)
|
||||
|
||||
define_option(MILVUS_WITH_PROMETHEUS "Build with PROMETHEUS library" ON)
|
||||
|
||||
define_option(MILVUS_WITH_SNAPPY "Build with Snappy compression" ON)
|
||||
|
||||
define_option(MILVUS_WITH_SQLITE "Build with SQLite library" ON)
|
||||
|
||||
define_option(MILVUS_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
|
||||
@ -78,16 +67,6 @@ define_option(MILVUS_WITH_MYSQLPP "Build with MySQL++" ON)
|
||||
|
||||
define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
|
||||
|
||||
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
|
||||
|
||||
if(CMAKE_VERSION VERSION_LESS 3.7)
|
||||
set(MILVUS_WITH_ZSTD_DEFAULT OFF)
|
||||
else()
|
||||
# ExternalProject_Add(SOURCE_SUBDIR) is available since CMake 3.7.
|
||||
set(MILVUS_WITH_ZSTD_DEFAULT ON)
|
||||
endif()
|
||||
define_option(MILVUS_WITH_ZSTD "Build with zstd compression" ${MILVUS_WITH_ZSTD_DEFAULT})
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
|
||||
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
|
||||
@ -95,6 +74,8 @@ endif()
|
||||
|
||||
define_option(MILVUS_WITH_GRPC "Build with GRPC" ON)
|
||||
|
||||
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
if(MSVC)
|
||||
set_option_category("MSVC")
|
||||
|
||||
@ -16,21 +16,16 @@
|
||||
|
||||
set(MILVUS_THIRDPARTY_DEPENDENCIES
|
||||
|
||||
BOOST
|
||||
BZip2
|
||||
GTest
|
||||
Lz4
|
||||
MySQLPP
|
||||
Prometheus
|
||||
Snappy
|
||||
SQLite
|
||||
SQLite_ORM
|
||||
yaml-cpp
|
||||
ZLIB
|
||||
ZSTD
|
||||
libunwind
|
||||
gperftools
|
||||
GRPC)
|
||||
GRPC
|
||||
ZLIB)
|
||||
|
||||
message(STATUS "Using ${MILVUS_DEPENDENCY_SOURCE} approach to find dependencies")
|
||||
|
||||
@ -42,34 +37,26 @@ foreach(DEPENDENCY ${MILVUS_THIRDPARTY_DEPENDENCIES})
|
||||
endforeach()
|
||||
|
||||
macro(build_dependency DEPENDENCY_NAME)
|
||||
if("${DEPENDENCY_NAME}" STREQUAL "BZip2")
|
||||
build_bzip2()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
|
||||
if ("${DEPENDENCY_NAME}" STREQUAL "GTest")
|
||||
build_gtest()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "Lz4")
|
||||
build_lz4()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "MySQLPP")
|
||||
build_mysqlpp()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "Prometheus")
|
||||
build_prometheus()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "Snappy")
|
||||
build_snappy()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "SQLite")
|
||||
build_sqlite()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "SQLite_ORM")
|
||||
build_sqlite_orm()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "yaml-cpp")
|
||||
build_yamlcpp()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "ZLIB")
|
||||
build_zlib()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "ZSTD")
|
||||
build_zstd()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "libunwind")
|
||||
build_libunwind()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "gperftools")
|
||||
build_gperftools()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "GRPC")
|
||||
build_grpc()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "ZLIB")
|
||||
build_zlib()
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown thirdparty dependency to build: ${DEPENDENCY_NAME}")
|
||||
endif ()
|
||||
@ -263,23 +250,6 @@ foreach(_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT})
|
||||
set(${_LIB_NAME} "${_LIB_VERSION}")
|
||||
endforeach()
|
||||
|
||||
if(DEFINED ENV{MILVUS_BOOST_URL})
|
||||
set(BOOST_SOURCE_URL "$ENV{MILVUS_BOOST_URL}")
|
||||
else()
|
||||
string(REPLACE "." "_" BOOST_VERSION_UNDERSCORES ${BOOST_VERSION})
|
||||
set(BOOST_SOURCE_URL
|
||||
"https://nchc.dl.sourceforge.net/project/boost/boost/${BOOST_VERSION}/boost_${BOOST_VERSION_UNDERSCORES}.tar.gz")
|
||||
#"https://dl.bintray.com/boostorg/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_UNDERSCORES}.tar.gz")
|
||||
endif()
|
||||
set(BOOST_MD5 "fea771fe8176828fabf9c09242ee8c26")
|
||||
|
||||
if(DEFINED ENV{MILVUS_BZIP2_URL})
|
||||
set(BZIP2_SOURCE_URL "$ENV{MILVUS_BZIP2_URL}")
|
||||
else()
|
||||
set(BZIP2_SOURCE_URL "https://sourceware.org/pub/bzip2/bzip2-${BZIP2_VERSION}.tar.gz")
|
||||
endif()
|
||||
set(BZIP2_MD5 "00b516f4704d4a7cb50a1d97e6e8e15b")
|
||||
|
||||
if (DEFINED ENV{MILVUS_GTEST_URL})
|
||||
set(GTEST_SOURCE_URL "$ENV{MILVUS_GTEST_URL}")
|
||||
else ()
|
||||
@ -288,13 +258,6 @@ else ()
|
||||
endif()
|
||||
set(GTEST_MD5 "2e6fbeb6a91310a16efe181886c59596")
|
||||
|
||||
if(DEFINED ENV{MILVUS_LZ4_URL})
|
||||
set(LZ4_SOURCE_URL "$ENV{MILVUS_LZ4_URL}")
|
||||
else()
|
||||
set(LZ4_SOURCE_URL "https://github.com/lz4/lz4/archive/${LZ4_VERSION}.tar.gz")
|
||||
endif()
|
||||
set(LZ4_MD5 "a80f28f2a2e5fe59ebfe8407f793da22")
|
||||
|
||||
if(DEFINED ENV{MILVUS_MYSQLPP_URL})
|
||||
set(MYSQLPP_SOURCE_URL "$ENV{MILVUS_MYSQLPP_URL}")
|
||||
else()
|
||||
@ -309,14 +272,6 @@ else ()
|
||||
https://github.com/jupp0r/prometheus-cpp.git)
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MILVUS_SNAPPY_URL})
|
||||
set(SNAPPY_SOURCE_URL "$ENV{MILVUS_SNAPPY_URL}")
|
||||
else()
|
||||
set(SNAPPY_SOURCE_URL
|
||||
"https://github.com/google/snappy/archive/${SNAPPY_VERSION}.tar.gz")
|
||||
endif()
|
||||
set(SNAPPY_MD5 "ee9086291c9ae8deb4dac5e0b85bf54a")
|
||||
|
||||
if(DEFINED ENV{MILVUS_SQLITE_URL})
|
||||
set(SQLITE_SOURCE_URL "$ENV{MILVUS_SQLITE_URL}")
|
||||
else()
|
||||
@ -329,7 +284,6 @@ if(DEFINED ENV{MILVUS_SQLITE_ORM_URL})
|
||||
set(SQLITE_ORM_SOURCE_URL "$ENV{MILVUS_SQLITE_ORM_URL}")
|
||||
else()
|
||||
set(SQLITE_ORM_SOURCE_URL
|
||||
# "http://192.168.1.105:6060/Test/sqlite_orm/-/archive/master/sqlite_orm-master.zip")
|
||||
"https://github.com/fnc12/sqlite_orm/archive/${SQLITE_ORM_VERSION}.zip")
|
||||
endif()
|
||||
set(SQLITE_ORM_MD5 "ba9a405a8a1421c093aa8ce988ff8598")
|
||||
@ -341,20 +295,6 @@ else()
|
||||
endif()
|
||||
set(YAMLCPP_MD5 "5b943e9af0060d0811148b037449ef82")
|
||||
|
||||
if(DEFINED ENV{MILVUS_ZLIB_URL})
|
||||
set(ZLIB_SOURCE_URL "$ENV{MILVUS_ZLIB_URL}")
|
||||
else()
|
||||
set(ZLIB_SOURCE_URL "https://github.com/madler/zlib/archive/${ZLIB_VERSION}.tar.gz")
|
||||
endif()
|
||||
set(ZLIB_MD5 "0095d2d2d1f3442ce1318336637b695f")
|
||||
|
||||
if(DEFINED ENV{MILVUS_ZSTD_URL})
|
||||
set(ZSTD_SOURCE_URL "$ENV{MILVUS_ZSTD_URL}")
|
||||
else()
|
||||
set(ZSTD_SOURCE_URL "https://github.com/facebook/zstd/archive/${ZSTD_VERSION}.tar.gz")
|
||||
endif()
|
||||
set(ZSTD_MD5 "340c837db48354f8d5eafe74c6077120")
|
||||
|
||||
if(DEFINED ENV{MILVUS_LIBUNWIND_URL})
|
||||
set(LIBUNWIND_SOURCE_URL "$ENV{MILVUS_LIBUNWIND_URL}")
|
||||
else()
|
||||
@ -379,202 +319,12 @@ else()
|
||||
endif()
|
||||
set(GRPC_MD5 "0362ba219f59432c530070b5f5c3df73")
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Add Boost dependencies (code adapted from Apache Kudu (incubating))
|
||||
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
set(Boost_ADDITIONAL_VERSIONS
|
||||
"1.70.0"
|
||||
"1.70"
|
||||
"1.69.0"
|
||||
"1.69"
|
||||
"1.68.0"
|
||||
"1.68"
|
||||
"1.67.0"
|
||||
"1.67"
|
||||
"1.66.0"
|
||||
"1.66"
|
||||
"1.65.0"
|
||||
"1.65"
|
||||
"1.64.0"
|
||||
"1.64"
|
||||
"1.63.0"
|
||||
"1.63"
|
||||
"1.62.0"
|
||||
"1.61"
|
||||
"1.61.0"
|
||||
"1.62"
|
||||
"1.60.0"
|
||||
"1.60")
|
||||
|
||||
if(MILVUS_BOOST_VENDORED)
|
||||
set(BOOST_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/boost_ep-prefix/src/boost_ep")
|
||||
set(BOOST_LIB_DIR "${BOOST_PREFIX}/stage/lib")
|
||||
set(BOOST_BUILD_LINK "static")
|
||||
set(BOOST_STATIC_SYSTEM_LIBRARY
|
||||
"${BOOST_LIB_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}boost_system${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
set(BOOST_STATIC_FILESYSTEM_LIBRARY
|
||||
"${BOOST_LIB_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}boost_filesystem${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
set(BOOST_STATIC_SERIALIZATION_LIBRARY
|
||||
"${BOOST_LIB_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}boost_serialization${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
set(BOOST_SYSTEM_LIBRARY boost_system_static)
|
||||
set(BOOST_FILESYSTEM_LIBRARY boost_filesystem_static)
|
||||
set(BOOST_SERIALIZATION_LIBRARY boost_serialization_static)
|
||||
|
||||
if(MILVUS_BOOST_HEADER_ONLY)
|
||||
set(BOOST_BUILD_PRODUCTS)
|
||||
set(BOOST_CONFIGURE_COMMAND "")
|
||||
set(BOOST_BUILD_COMMAND "")
|
||||
else()
|
||||
set(BOOST_BUILD_PRODUCTS ${BOOST_STATIC_SYSTEM_LIBRARY}
|
||||
${BOOST_STATIC_FILESYSTEM_LIBRARY} ${BOOST_STATIC_SERIALIZATION_LIBRARY})
|
||||
set(BOOST_CONFIGURE_COMMAND "./bootstrap.sh" "--prefix=${BOOST_PREFIX}"
|
||||
"--with-libraries=filesystem,serialization,system")
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "DEBUG")
|
||||
set(BOOST_BUILD_VARIANT "debug")
|
||||
else()
|
||||
set(BOOST_BUILD_VARIANT "release")
|
||||
endif()
|
||||
set(BOOST_BUILD_COMMAND
|
||||
"./b2"
|
||||
"link=${BOOST_BUILD_LINK}"
|
||||
"variant=${BOOST_BUILD_VARIANT}"
|
||||
"cxxflags=-fPIC")
|
||||
|
||||
add_thirdparty_lib(boost_system STATIC_LIB "${BOOST_STATIC_SYSTEM_LIBRARY}")
|
||||
|
||||
add_thirdparty_lib(boost_filesystem STATIC_LIB "${BOOST_STATIC_FILESYSTEM_LIBRARY}")
|
||||
|
||||
add_thirdparty_lib(boost_serialization STATIC_LIB "${BOOST_STATIC_SERIALIZATION_LIBRARY}")
|
||||
|
||||
set(MILVUS_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY} ${BOOST_STATIC_SERIALIZATION_LIBRARY})
|
||||
endif()
|
||||
externalproject_add(boost_ep
|
||||
URL
|
||||
${BOOST_SOURCE_URL}
|
||||
BUILD_BYPRODUCTS
|
||||
${BOOST_BUILD_PRODUCTS}
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
CONFIGURE_COMMAND
|
||||
${BOOST_CONFIGURE_COMMAND}
|
||||
BUILD_COMMAND
|
||||
${BOOST_BUILD_COMMAND}
|
||||
INSTALL_COMMAND
|
||||
""
|
||||
${EP_LOG_OPTIONS})
|
||||
|
||||
|
||||
set(Boost_INCLUDE_DIR "${BOOST_PREFIX}")
|
||||
set(Boost_INCLUDE_DIRS "${Boost_INCLUDE_DIR}")
|
||||
add_dependencies(boost_system_static boost_ep)
|
||||
add_dependencies(boost_filesystem_static boost_ep)
|
||||
add_dependencies(boost_serialization_static boost_ep)
|
||||
|
||||
endif()
|
||||
|
||||
include_directories(SYSTEM ${Boost_INCLUDE_DIR})
|
||||
link_directories(SYSTEM ${BOOST_LIB_DIR})
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# bzip2
|
||||
|
||||
macro(build_bzip2)
|
||||
message(STATUS "Building BZip2-${BZIP2_VERSION} from source")
|
||||
set(BZIP2_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/bzip2_ep-prefix/src/bzip2_ep")
|
||||
set(BZIP2_INCLUDE_DIR "${BZIP2_PREFIX}/include")
|
||||
set(BZIP2_STATIC_LIB
|
||||
"${BZIP2_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}bz2${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(BZIP2_CACHE_PACKAGE_NAME "bzip2_${BZIP2_MD5}.tar.gz")
|
||||
set(BZIP2_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${BZIP2_CACHE_PACKAGE_NAME}")
|
||||
set(BZIP2_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${BZIP2_CACHE_PACKAGE_NAME}")
|
||||
|
||||
execute_process(COMMAND wget -q --method HEAD ${BZIP2_CACHE_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote cache file ${BZIP2_CACHE_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
externalproject_add(bzip2_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
CONFIGURE_COMMAND
|
||||
""
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
CFLAGS=${EP_C_FLAGS}
|
||||
INSTALL_COMMAND
|
||||
${MAKE}
|
||||
install
|
||||
PREFIX=${BZIP2_PREFIX}
|
||||
CFLAGS=${EP_C_FLAGS}
|
||||
INSTALL_DIR
|
||||
${BZIP2_PREFIX}
|
||||
URL
|
||||
${BZIP2_SOURCE_URL}
|
||||
BUILD_BYPRODUCTS
|
||||
"${BZIP2_STATIC_LIB}")
|
||||
|
||||
ExternalProject_Create_Cache(bzip2_ep ${BZIP2_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/bzip2_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${BZIP2_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${BZIP2_CACHE_URL} ${BZIP2_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${BZIP2_CACHE_URL} TO ${BZIP2_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(bzip2_ep ${BZIP2_CACHE_PACKAGE_PATH} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
externalproject_add(bzip2_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
CONFIGURE_COMMAND
|
||||
""
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
CFLAGS=${EP_C_FLAGS}
|
||||
INSTALL_COMMAND
|
||||
${MAKE}
|
||||
install
|
||||
PREFIX=${BZIP2_PREFIX}
|
||||
CFLAGS=${EP_C_FLAGS}
|
||||
INSTALL_DIR
|
||||
${BZIP2_PREFIX}
|
||||
URL
|
||||
${BZIP2_SOURCE_URL}
|
||||
BUILD_BYPRODUCTS
|
||||
"${BZIP2_STATIC_LIB}")
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${BZIP2_INCLUDE_DIR}")
|
||||
add_library(bzip2 STATIC IMPORTED)
|
||||
set_target_properties(
|
||||
bzip2
|
||||
PROPERTIES IMPORTED_LOCATION "${BZIP2_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${BZIP2_INCLUDE_DIR}")
|
||||
|
||||
add_dependencies(bzip2 bzip2_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_BZ2)
|
||||
resolve_dependency(BZip2)
|
||||
|
||||
if(NOT TARGET bzip2)
|
||||
add_library(bzip2 UNKNOWN IMPORTED)
|
||||
set_target_properties(bzip2
|
||||
PROPERTIES IMPORTED_LOCATION "${BZIP2_LIBRARIES}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${BZIP2_INCLUDE_DIR}")
|
||||
endif()
|
||||
link_directories(SYSTEM ${BZIP2_PREFIX}/lib/)
|
||||
include_directories(SYSTEM "${BZIP2_INCLUDE_DIR}")
|
||||
if(DEFINED ENV{MILVUS_ZLIB_URL})
|
||||
set(ZLIB_SOURCE_URL "$ENV{MILVUS_ZLIB_URL}")
|
||||
else()
|
||||
set(ZLIB_SOURCE_URL "https://github.com/madler/zlib/archive/${ZLIB_VERSION}.tar.gz")
|
||||
endif()
|
||||
set(ZLIB_MD5 "0095d2d2d1f3442ce1318336637b695f")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Google gtest
|
||||
@ -689,95 +439,6 @@ if (MILVUS_BUILD_TESTS)
|
||||
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# lz4
|
||||
|
||||
macro(build_lz4)
|
||||
message(STATUS "Building lz4-${LZ4_VERSION} from source")
|
||||
set(LZ4_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/lz4_ep-prefix/src/lz4_ep")
|
||||
set(LZ4_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/lz4_ep-prefix/")
|
||||
|
||||
set(LZ4_STATIC_LIB "${LZ4_BUILD_DIR}/lib/liblz4.a")
|
||||
set(LZ4_BUILD_COMMAND BUILD_COMMAND ${MAKE} ${MAKE_BUILD_ARGS} CFLAGS=${EP_C_FLAGS})
|
||||
|
||||
# We need to copy the header in lib to directory outside of the build
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(LZ4_CACHE_PACKAGE_NAME "lz4_${LZ4_MD5}.tar.gz")
|
||||
set(LZ4_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${LZ4_CACHE_PACKAGE_NAME}")
|
||||
set(LZ4_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${LZ4_CACHE_PACKAGE_NAME}")
|
||||
|
||||
execute_process(COMMAND wget -q --method HEAD ${LZ4_CACHE_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote file ${LZ4_CACHE_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
externalproject_add(lz4_ep
|
||||
URL
|
||||
${LZ4_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
UPDATE_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
-E
|
||||
copy_directory
|
||||
"${LZ4_BUILD_DIR}/lib"
|
||||
"${LZ4_PREFIX}/include"
|
||||
${LZ4_PATCH_COMMAND}
|
||||
CONFIGURE_COMMAND
|
||||
""
|
||||
INSTALL_COMMAND
|
||||
""
|
||||
BINARY_DIR
|
||||
${LZ4_BUILD_DIR}
|
||||
BUILD_BYPRODUCTS
|
||||
${LZ4_STATIC_LIB}
|
||||
${LZ4_BUILD_COMMAND})
|
||||
|
||||
ExternalProject_Create_Cache(lz4_ep ${LZ4_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/lz4_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${LZ4_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${LZ4_CACHE_URL} ${LZ4_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${LZ4_CACHE_URL} TO ${LZ4_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(lz4_ep ${LZ4_CACHE_PACKAGE_PATH} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
externalproject_add(lz4_ep
|
||||
URL
|
||||
${LZ4_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
UPDATE_COMMAND
|
||||
${CMAKE_COMMAND}
|
||||
-E
|
||||
copy_directory
|
||||
"${LZ4_BUILD_DIR}/lib"
|
||||
"${LZ4_PREFIX}/include"
|
||||
${LZ4_PATCH_COMMAND}
|
||||
CONFIGURE_COMMAND
|
||||
""
|
||||
INSTALL_COMMAND
|
||||
""
|
||||
BINARY_DIR
|
||||
${LZ4_BUILD_DIR}
|
||||
BUILD_BYPRODUCTS
|
||||
${LZ4_STATIC_LIB}
|
||||
${LZ4_BUILD_COMMAND})
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${LZ4_PREFIX}/include")
|
||||
add_library(lz4 STATIC IMPORTED)
|
||||
set_target_properties(lz4
|
||||
PROPERTIES IMPORTED_LOCATION "${LZ4_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${LZ4_PREFIX}/include")
|
||||
add_dependencies(lz4 lz4_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_LZ4)
|
||||
resolve_dependency(Lz4)
|
||||
|
||||
get_target_property(LZ4_INCLUDE_DIR lz4 INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${LZ4_BUILD_DIR}/lib/)
|
||||
include_directories(SYSTEM ${LZ4_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# MySQL++
|
||||
|
||||
@ -996,93 +657,6 @@ if(MILVUS_WITH_PROMETHEUS)
|
||||
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Snappy
|
||||
|
||||
macro(build_snappy)
|
||||
message(STATUS "Building snappy-${SNAPPY_VERSION} from source")
|
||||
set(SNAPPY_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/snappy_ep-prefix/src/snappy_ep")
|
||||
set(SNAPPY_INCLUDE_DIRS "${SNAPPY_PREFIX}/include")
|
||||
set(SNAPPY_STATIC_LIB_NAME snappy)
|
||||
set(SNAPPY_STATIC_LIB
|
||||
"${SNAPPY_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${SNAPPY_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
|
||||
set(SNAPPY_CMAKE_ARGS
|
||||
${EP_COMMON_CMAKE_ARGS}
|
||||
-DCMAKE_INSTALL_LIBDIR=lib
|
||||
-DSNAPPY_BUILD_TESTS=OFF
|
||||
"-DCMAKE_INSTALL_PREFIX=${SNAPPY_PREFIX}")
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(SNAPPY_CACHE_PACKAGE_NAME "snappy_${SNAPPY_MD5}.tar.gz")
|
||||
set(SNAPPY_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${SNAPPY_CACHE_PACKAGE_NAME}")
|
||||
set(SNAPPY_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${SNAPPY_CACHE_PACKAGE_NAME}")
|
||||
|
||||
execute_process(COMMAND wget -q --method HEAD ${SNAPPY_CACHE_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote file ${SNAPPY_CACHE_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
externalproject_add(snappy_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
INSTALL_DIR
|
||||
${SNAPPY_PREFIX}
|
||||
URL
|
||||
${SNAPPY_SOURCE_URL}
|
||||
CMAKE_ARGS
|
||||
${SNAPPY_CMAKE_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
"${SNAPPY_STATIC_LIB}")
|
||||
|
||||
ExternalProject_Create_Cache(snappy_ep ${SNAPPY_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/snappy_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${SNAPPY_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${SNAPPY_CACHE_URL} ${SNAPPY_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${SNAPPY_CACHE_URL} TO ${SNAPPY_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(snappy_ep ${SNAPPY_CACHE_PACKAGE_PATH} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
externalproject_add(snappy_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
BUILD_IN_SOURCE
|
||||
1
|
||||
INSTALL_DIR
|
||||
${SNAPPY_PREFIX}
|
||||
URL
|
||||
${SNAPPY_SOURCE_URL}
|
||||
CMAKE_ARGS
|
||||
${SNAPPY_CMAKE_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
"${SNAPPY_STATIC_LIB}")
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${SNAPPY_INCLUDE_DIR}")
|
||||
add_library(snappy STATIC IMPORTED)
|
||||
set_target_properties(snappy
|
||||
PROPERTIES IMPORTED_LOCATION "${SNAPPY_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${SNAPPY_INCLUDE_DIR}")
|
||||
add_dependencies(snappy snappy_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_SNAPPY)
|
||||
|
||||
resolve_dependency(Snappy)
|
||||
|
||||
get_target_property(SNAPPY_INCLUDE_DIRS snappy INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${SNAPPY_PREFIX}/lib/)
|
||||
include_directories(SYSTEM ${SNAPPY_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# SQLite
|
||||
|
||||
@ -1265,176 +839,6 @@ if(MILVUS_WITH_YAMLCPP)
|
||||
include_directories(SYSTEM ${YAMLCPP_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# zlib
|
||||
|
||||
macro(build_zlib)
|
||||
message(STATUS "Building ZLIB-${ZLIB_VERSION} from source")
|
||||
set(ZLIB_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/zlib_ep-prefix/src/zlib_ep")
|
||||
set(ZLIB_STATIC_LIB_NAME libz.a)
|
||||
set(ZLIB_STATIC_LIB "${ZLIB_PREFIX}/lib/${ZLIB_STATIC_LIB_NAME}")
|
||||
set(ZLIB_INCLUDE_DIR "${ZLIB_PREFIX}/include")
|
||||
set(ZLIB_CMAKE_ARGS ${EP_COMMON_CMAKE_ARGS} "-DCMAKE_INSTALL_PREFIX=${ZLIB_PREFIX}"
|
||||
-DBUILD_SHARED_LIBS=OFF)
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(ZLIB_CACHE_PACKAGE_NAME "zlib_${ZLIB_MD5}.tar.gz")
|
||||
set(ZLIB_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${ZLIB_CACHE_PACKAGE_NAME}")
|
||||
set(ZLIB_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${ZLIB_CACHE_PACKAGE_NAME}")
|
||||
|
||||
execute_process(COMMAND wget -q --method HEAD ${ZLIB_CACHE_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote file ${ZLIB_CACHE_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
externalproject_add(zlib_ep
|
||||
URL
|
||||
${ZLIB_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
"${ZLIB_STATIC_LIB}"
|
||||
CMAKE_ARGS
|
||||
${ZLIB_CMAKE_ARGS})
|
||||
|
||||
ExternalProject_Create_Cache(zlib_ep ${ZLIB_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/zlib_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${ZLIB_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${ZLIB_CACHE_URL} ${ZLIB_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${ZLIB_CACHE_URL} TO ${ZLIB_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(zlib_ep ${ZLIB_CACHE_PACKAGE_PATH} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
externalproject_add(zlib_ep
|
||||
URL
|
||||
${ZLIB_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
"${ZLIB_STATIC_LIB}"
|
||||
CMAKE_ARGS
|
||||
${ZLIB_CMAKE_ARGS})
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${ZLIB_INCLUDE_DIR}")
|
||||
add_library(zlib STATIC IMPORTED)
|
||||
set_target_properties(zlib
|
||||
PROPERTIES IMPORTED_LOCATION "${ZLIB_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${ZLIB_INCLUDE_DIR}")
|
||||
|
||||
add_dependencies(zlib zlib_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_ZLIB)
|
||||
resolve_dependency(ZLIB)
|
||||
|
||||
get_target_property(ZLIB_INCLUDE_DIR zlib INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM ${ZLIB_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# zstd
|
||||
|
||||
macro(build_zstd)
|
||||
message(STATUS "Building zstd-${ZSTD_VERSION} from source")
|
||||
set(ZSTD_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/zstd_ep-prefix/src/zstd_ep")
|
||||
|
||||
set(ZSTD_CMAKE_ARGS
|
||||
${EP_COMMON_TOOLCHAIN}
|
||||
"-DCMAKE_INSTALL_PREFIX=${ZSTD_PREFIX}"
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
|
||||
-DCMAKE_INSTALL_LIBDIR=lib #${CMAKE_INSTALL_LIBDIR}
|
||||
-DZSTD_BUILD_PROGRAMS=off
|
||||
-DZSTD_BUILD_SHARED=off
|
||||
-DZSTD_BUILD_STATIC=on
|
||||
-DZSTD_MULTITHREAD_SUPPORT=off)
|
||||
|
||||
|
||||
set(ZSTD_STATIC_LIB "${ZSTD_PREFIX}/lib/libzstd.a")
|
||||
set(ZSTD_INCLUDE_DIR "${ZSTD_PREFIX}/include")
|
||||
set(ZSTD_CMAKE_ARGS
|
||||
${ZSTD_CMAKE_ARGS}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_FLAGS=${EP_C_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS})
|
||||
|
||||
if(CMAKE_VERSION VERSION_LESS 3.7)
|
||||
message(FATAL_ERROR "Building zstd using ExternalProject requires at least CMake 3.7")
|
||||
endif()
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(ZSTD_CACHE_PACKAGE_NAME "zstd_${ZSTD_MD5}.tar.gz")
|
||||
set(ZSTD_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${ZSTD_CACHE_PACKAGE_NAME}")
|
||||
set(ZSTD_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${ZSTD_CACHE_PACKAGE_NAME}")
|
||||
|
||||
execute_process(COMMAND wget -q --method HEAD ${ZSTD_CACHE_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote file ${ZSTD_CACHE_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
externalproject_add(zstd_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
${ZSTD_CMAKE_ARGS}
|
||||
SOURCE_SUBDIR
|
||||
"build/cmake"
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
INSTALL_DIR
|
||||
${ZSTD_PREFIX}
|
||||
URL
|
||||
${ZSTD_SOURCE_URL}
|
||||
BUILD_BYPRODUCTS
|
||||
"${ZSTD_STATIC_LIB}")
|
||||
|
||||
ExternalProject_Create_Cache(zstd_ep ${ZSTD_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/zstd_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${ZSTD_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${ZSTD_CACHE_URL} ${ZSTD_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${ZSTD_CACHE_URL} TO ${ZSTD_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(zstd_ep ${ZSTD_CACHE_PACKAGE_PATH} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
externalproject_add(zstd_ep
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
${ZSTD_CMAKE_ARGS}
|
||||
SOURCE_SUBDIR
|
||||
"build/cmake"
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
INSTALL_DIR
|
||||
${ZSTD_PREFIX}
|
||||
URL
|
||||
${ZSTD_SOURCE_URL}
|
||||
BUILD_BYPRODUCTS
|
||||
"${ZSTD_STATIC_LIB}")
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${ZSTD_INCLUDE_DIR}")
|
||||
add_library(zstd STATIC IMPORTED)
|
||||
set_target_properties(zstd
|
||||
PROPERTIES IMPORTED_LOCATION "${ZSTD_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${ZSTD_INCLUDE_DIR}")
|
||||
|
||||
add_dependencies(zstd zstd_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_ZSTD)
|
||||
resolve_dependency(ZSTD)
|
||||
|
||||
get_target_property(ZSTD_INCLUDE_DIR zstd INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM ${ZSTD_PREFIX}/lib)
|
||||
include_directories(SYSTEM ${ZSTD_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# libunwind
|
||||
|
||||
@ -1637,6 +1041,8 @@ macro(build_grpc)
|
||||
${GRPC_PROTOBUF_STATIC_LIB}
|
||||
${GRPC_PROTOC_STATIC_LIB})
|
||||
|
||||
ExternalProject_Add_StepDependencies(grpc_ep build zlib_ep)
|
||||
|
||||
ExternalProject_Create_Cache(grpc_ep ${GRPC_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/grpc_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${GRPC_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${GRPC_CACHE_URL} ${GRPC_CACHE_PACKAGE_PATH} STATUS status)
|
||||
@ -1665,6 +1071,9 @@ macro(build_grpc)
|
||||
${GRPCPP_CHANNELZ_STATIC_LIB}
|
||||
${GRPC_PROTOBUF_STATIC_LIB}
|
||||
${GRPC_PROTOC_STATIC_LIB})
|
||||
|
||||
ExternalProject_Add_StepDependencies(grpc_ep build zlib_ep)
|
||||
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${GRPC_INCLUDE_DIR}")
|
||||
@ -1672,25 +1081,30 @@ macro(build_grpc)
|
||||
add_library(grpc STATIC IMPORTED)
|
||||
set_target_properties(grpc
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPC_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GRPC_INCLUDE_DIR}")
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GRPC_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "zlib" )
|
||||
|
||||
add_library(grpc++ STATIC IMPORTED)
|
||||
set_target_properties(grpc++
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPC++_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GRPC_INCLUDE_DIR}")
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GRPC_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "zlib" )
|
||||
|
||||
add_library(grpcpp_channelz STATIC IMPORTED)
|
||||
set_target_properties(grpcpp_channelz
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPCPP_CHANNELZ_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GRPC_INCLUDE_DIR}")
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GRPC_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "zlib" )
|
||||
|
||||
add_library(grpc_protobuf STATIC IMPORTED)
|
||||
set_target_properties(grpc_protobuf
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPC_PROTOBUF_STATIC_LIB}")
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPC_PROTOBUF_STATIC_LIB}"
|
||||
INTERFACE_LINK_LIBRARIES "zlib" )
|
||||
|
||||
add_library(grpc_protoc STATIC IMPORTED)
|
||||
set_target_properties(grpc_protoc
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPC_PROTOC_STATIC_LIB}")
|
||||
PROPERTIES IMPORTED_LOCATION "${GRPC_PROTOC_STATIC_LIB}"
|
||||
INTERFACE_LINK_LIBRARIES "zlib" )
|
||||
|
||||
add_dependencies(grpc grpc_ep)
|
||||
add_dependencies(grpc++ grpc_ep)
|
||||
@ -1710,3 +1124,74 @@ if(MILVUS_WITH_GRPC)
|
||||
include_directories(SYSTEM ${GRPC_THIRD_PARTY_DIR}/protobuf/src)
|
||||
link_directories(SYSTEM ${GRPC_PROTOBUF_LIB_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# zlib
|
||||
|
||||
macro(build_zlib)
|
||||
message(STATUS "Building ZLIB-${ZLIB_VERSION} from source")
|
||||
set(ZLIB_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/zlib_ep-prefix/src/zlib_ep")
|
||||
set(ZLIB_STATIC_LIB_NAME libz.a)
|
||||
set(ZLIB_STATIC_LIB "${ZLIB_PREFIX}/lib/${ZLIB_STATIC_LIB_NAME}")
|
||||
set(ZLIB_INCLUDE_DIR "${ZLIB_PREFIX}/include")
|
||||
set(ZLIB_CMAKE_ARGS ${EP_COMMON_CMAKE_ARGS} "-DCMAKE_INSTALL_PREFIX=${ZLIB_PREFIX}"
|
||||
-DBUILD_SHARED_LIBS=OFF)
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(ZLIB_CACHE_PACKAGE_NAME "zlib_${ZLIB_MD5}.tar.gz")
|
||||
set(ZLIB_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${ZLIB_CACHE_PACKAGE_NAME}")
|
||||
set(ZLIB_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${ZLIB_CACHE_PACKAGE_NAME}")
|
||||
|
||||
execute_process(COMMAND wget -q --method HEAD ${ZLIB_CACHE_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote file ${ZLIB_CACHE_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
externalproject_add(zlib_ep
|
||||
URL
|
||||
${ZLIB_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
"${ZLIB_STATIC_LIB}"
|
||||
CMAKE_ARGS
|
||||
${ZLIB_CMAKE_ARGS})
|
||||
|
||||
ExternalProject_Create_Cache(zlib_ep ${ZLIB_CACHE_PACKAGE_PATH} "${CMAKE_CURRENT_BINARY_DIR}/zlib_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${ZLIB_CACHE_URL})
|
||||
else()
|
||||
file(DOWNLOAD ${ZLIB_CACHE_URL} ${ZLIB_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${ZLIB_CACHE_URL} TO ${ZLIB_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(zlib_ep ${ZLIB_CACHE_PACKAGE_PATH} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
externalproject_add(zlib_ep
|
||||
URL
|
||||
${ZLIB_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
"${ZLIB_STATIC_LIB}"
|
||||
CMAKE_ARGS
|
||||
${ZLIB_CMAKE_ARGS})
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${ZLIB_INCLUDE_DIR}")
|
||||
add_library(zlib STATIC IMPORTED)
|
||||
set_target_properties(zlib
|
||||
PROPERTIES IMPORTED_LOCATION "${ZLIB_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${ZLIB_INCLUDE_DIR}")
|
||||
|
||||
add_dependencies(zlib zlib_ep)
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_ZLIB)
|
||||
resolve_dependency(ZLIB)
|
||||
|
||||
get_target_property(ZLIB_INCLUDE_DIR zlib INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM ${ZLIB_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
@ -99,6 +99,7 @@ for test in `ls ${DIR_UNITTEST}`; do
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ${args}
|
||||
echo ${DIR_UNITTEST}/${test} "run failed"
|
||||
exit -1
|
||||
fi
|
||||
done
|
||||
|
||||
@ -121,9 +122,13 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
||||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/src/external/easyloggingpp/easylogging++.h" \
|
||||
"*/src/external/easyloggingpp/easylogging++.cc" \
|
||||
"*/easylogging++.h" \
|
||||
"*/easylogging++.cc" \
|
||||
"*/src/external/*"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "generate ${FILE_INFO_OUTPUT_NEW} failed"
|
||||
exit -2
|
||||
fi
|
||||
# gen html report
|
||||
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
|
||||
|
||||
@ -120,14 +120,10 @@ set(third_party_libs
|
||||
${client_grpc_lib}
|
||||
yaml-cpp
|
||||
${prometheus_lib}
|
||||
${boost_lib}
|
||||
bzip2
|
||||
lz4
|
||||
snappy
|
||||
zlib
|
||||
zstd
|
||||
${cuda_lib}
|
||||
mysqlpp
|
||||
zlib
|
||||
${boost_lib}
|
||||
)
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
|
||||
@ -299,12 +299,29 @@ macro(build_arrow)
|
||||
${EP_COMMON_CMAKE_ARGS}
|
||||
-DARROW_BUILD_STATIC=ON
|
||||
-DARROW_BUILD_SHARED=OFF
|
||||
-DARROW_PARQUET=OFF
|
||||
-DARROW_USE_GLOG=OFF
|
||||
-DCMAKE_INSTALL_PREFIX=${ARROW_PREFIX}
|
||||
"-DCMAKE_LIBRARY_PATH=${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs"
|
||||
-DARROW_CUDA=OFF
|
||||
-DARROW_FLIGHT=OFF
|
||||
-DARROW_GANDIVA=OFF
|
||||
-DARROW_GANDIVA_JAVA=OFF
|
||||
-DARROW_HDFS=OFF
|
||||
-DARROW_HIVESERVER2=OFF
|
||||
-DARROW_ORC=OFF
|
||||
-DARROW_PARQUET=OFF
|
||||
-DARROW_PLASMA=OFF
|
||||
-DARROW_PLASMA_JAVA_CLIENT=OFF
|
||||
-DARROW_PYTHON=OFF
|
||||
-DARROW_WITH_BZ2=OFF
|
||||
-DARROW_WITH_ZLIB=OFF
|
||||
-DARROW_WITH_LZ4=OFF
|
||||
-DARROW_WITH_SNAPPY=OFF
|
||||
-DARROW_WITH_ZSTD=OFF
|
||||
-DARROW_WITH_BROTLI=OFF
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DARROW_DEPENDENCY_SOURCE=BUNDLED) #Build all arrow dependencies from source instead of calling find_package first
|
||||
-DARROW_DEPENDENCY_SOURCE=BUNDLED #Build all arrow dependencies from source instead of calling find_package first
|
||||
-DBOOST_SOURCE=AUTO #try to find BOOST in the system default locations and build from source if not found
|
||||
)
|
||||
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
|
||||
@ -22,10 +22,10 @@
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
|
||||
#include "../version.h"
|
||||
#include "external/easyloggingpp/easylogging++.h"
|
||||
#include "metrics/Metrics.h"
|
||||
#include "server/Server.h"
|
||||
#include "src/version.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "utils/SignalUtil.h"
|
||||
|
||||
|
||||
@ -49,6 +49,15 @@ JobMgr::Stop() {
|
||||
}
|
||||
}
|
||||
|
||||
json
|
||||
JobMgr::Dump() const {
|
||||
json ret{
|
||||
{"running", running_},
|
||||
{"event_queue_length", queue_.size()},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
JobMgr::Put(const JobPtr& job) {
|
||||
{
|
||||
|
||||
@ -28,13 +28,14 @@
|
||||
#include <vector>
|
||||
|
||||
#include "ResourceMgr.h"
|
||||
#include "interface/interfaces.h"
|
||||
#include "job/Job.h"
|
||||
#include "task/Task.h"
|
||||
|
||||
namespace milvus {
|
||||
namespace scheduler {
|
||||
|
||||
class JobMgr {
|
||||
class JobMgr : public interface::dumpable {
|
||||
public:
|
||||
explicit JobMgr(ResourceMgrPtr res_mgr);
|
||||
|
||||
@ -44,6 +45,9 @@ class JobMgr {
|
||||
void
|
||||
Stop();
|
||||
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
public:
|
||||
void
|
||||
Put(const JobPtr& job);
|
||||
|
||||
@ -170,16 +170,20 @@ ResourceMgr::GetNumGpuResource() const {
|
||||
return num;
|
||||
}
|
||||
|
||||
std::string
|
||||
ResourceMgr::Dump() {
|
||||
std::stringstream ss;
|
||||
ss << "ResourceMgr contains " << resources_.size() << " resources." << std::endl;
|
||||
|
||||
json
|
||||
ResourceMgr::Dump() const {
|
||||
json resources{};
|
||||
for (auto& res : resources_) {
|
||||
ss << res->Dump();
|
||||
resources.push_back(res->Dump());
|
||||
}
|
||||
|
||||
return ss.str();
|
||||
json ret{
|
||||
{"number_of_resource", resources_.size()},
|
||||
{"number_of_disk_resource", disk_resources_.size()},
|
||||
{"number_of_cpu_resource", cpu_resources_.size()},
|
||||
{"number_of_gpu_resource", gpu_resources_.size()},
|
||||
{"resources", resources},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string
|
||||
@ -187,9 +191,9 @@ ResourceMgr::DumpTaskTables() {
|
||||
std::stringstream ss;
|
||||
ss << ">>>>>>>>>>>>>>>ResourceMgr::DumpTaskTable<<<<<<<<<<<<<<<" << std::endl;
|
||||
for (auto& resource : resources_) {
|
||||
ss << resource->Dump() << std::endl;
|
||||
ss << resource->task_table().Dump();
|
||||
ss << resource->Dump() << std::endl << std::endl;
|
||||
ss << resource->name() << std::endl;
|
||||
ss << resource->task_table().Dump().dump();
|
||||
ss << resource->name() << std::endl << std::endl;
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
@ -25,13 +25,14 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "interface/interfaces.h"
|
||||
#include "resource/Resource.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
namespace milvus {
|
||||
namespace scheduler {
|
||||
|
||||
class ResourceMgr {
|
||||
class ResourceMgr : public interface::dumpable {
|
||||
public:
|
||||
ResourceMgr() = default;
|
||||
|
||||
@ -103,8 +104,8 @@ class ResourceMgr {
|
||||
|
||||
public:
|
||||
/******** Utility Functions ********/
|
||||
std::string
|
||||
Dump();
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
std::string
|
||||
DumpTaskTables();
|
||||
|
||||
@ -66,9 +66,13 @@ Scheduler::PostEvent(const EventPtr& event) {
|
||||
event_cv_.notify_one();
|
||||
}
|
||||
|
||||
std::string
|
||||
Scheduler::Dump() {
|
||||
return std::string();
|
||||
json
|
||||
Scheduler::Dump() const {
|
||||
json ret{
|
||||
{"running", running_},
|
||||
{"event_queue_length", event_queue_.size()},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@ -25,14 +25,14 @@
|
||||
#include <unordered_map>
|
||||
|
||||
#include "ResourceMgr.h"
|
||||
#include "interface/interfaces.h"
|
||||
#include "resource/Resource.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
namespace milvus {
|
||||
namespace scheduler {
|
||||
|
||||
// TODO(wxyu): refactor, not friendly to unittest, logical in framework code
|
||||
class Scheduler {
|
||||
class Scheduler : public interface::dumpable {
|
||||
public:
|
||||
explicit Scheduler(ResourceMgrWPtr res_mgr);
|
||||
|
||||
@ -57,11 +57,8 @@ class Scheduler {
|
||||
void
|
||||
PostEvent(const EventPtr& event);
|
||||
|
||||
/*
|
||||
* Dump as string;
|
||||
*/
|
||||
std::string
|
||||
Dump();
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
private:
|
||||
/******** Events ********/
|
||||
|
||||
@ -53,7 +53,7 @@ ToString(TaskTableItemState state) {
|
||||
}
|
||||
|
||||
json
|
||||
TaskTimestamp::Dump() {
|
||||
TaskTimestamp::Dump() const {
|
||||
json ret{
|
||||
{"start", start}, {"load", load}, {"loaded", loaded}, {"execute", execute},
|
||||
{"executed", executed}, {"move", move}, {"moved", moved}, {"finish", finish},
|
||||
@ -141,7 +141,7 @@ TaskTableItem::Moved() {
|
||||
}
|
||||
|
||||
json
|
||||
TaskTableItem::Dump() {
|
||||
TaskTableItem::Dump() const {
|
||||
json ret{
|
||||
{"id", id},
|
||||
{"task", (int64_t)task.get()},
|
||||
@ -263,7 +263,7 @@ TaskTable::Get(uint64_t index) {
|
||||
//}
|
||||
|
||||
json
|
||||
TaskTable::Dump() {
|
||||
TaskTable::Dump() const {
|
||||
json ret;
|
||||
for (auto& item : table_) {
|
||||
ret.push_back(item->Dump());
|
||||
|
||||
@ -54,7 +54,7 @@ struct TaskTimestamp : public interface::dumpable {
|
||||
uint64_t finish = 0;
|
||||
|
||||
json
|
||||
Dump() override;
|
||||
Dump() const override;
|
||||
};
|
||||
|
||||
struct TaskTableItem : public interface::dumpable {
|
||||
@ -92,7 +92,7 @@ struct TaskTableItem : public interface::dumpable {
|
||||
Moved();
|
||||
|
||||
json
|
||||
Dump() override;
|
||||
Dump() const override;
|
||||
};
|
||||
|
||||
using TaskTableItemPtr = std::shared_ptr<TaskTableItem>;
|
||||
@ -245,7 +245,7 @@ class TaskTable : public interface::dumpable {
|
||||
* Dump;
|
||||
*/
|
||||
json
|
||||
Dump() override;
|
||||
Dump() const override;
|
||||
|
||||
private:
|
||||
std::uint64_t id_ = 0;
|
||||
|
||||
@ -37,7 +37,7 @@ struct dumpable {
|
||||
}
|
||||
|
||||
virtual json
|
||||
Dump() = 0;
|
||||
Dump() const = 0;
|
||||
};
|
||||
|
||||
} // namespace interface
|
||||
|
||||
@ -54,5 +54,13 @@ BuildIndexJob::BuildIndexDone(size_t to_index_id) {
|
||||
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " finish index file: " << to_index_id;
|
||||
}
|
||||
|
||||
json
|
||||
BuildIndexJob::Dump() const {
|
||||
json ret{
|
||||
{"number_of_to_index_file", to_index_files_.size()},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace scheduler
|
||||
} // namespace milvus
|
||||
|
||||
@ -53,6 +53,9 @@ class BuildIndexJob : public Job {
|
||||
void
|
||||
BuildIndexDone(size_t to_index_id);
|
||||
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
public:
|
||||
Status&
|
||||
GetStatus() {
|
||||
|
||||
@ -45,5 +45,15 @@ DeleteJob::ResourceDone() {
|
||||
cv_.notify_one();
|
||||
}
|
||||
|
||||
json
|
||||
DeleteJob::Dump() const {
|
||||
json ret{
|
||||
{"table_id", table_id_},
|
||||
{"number_of_resource", num_resource_},
|
||||
{"number_of_done", done_resource},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace scheduler
|
||||
} // namespace milvus
|
||||
|
||||
@ -44,6 +44,9 @@ class DeleteJob : public Job {
|
||||
void
|
||||
ResourceDone();
|
||||
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
public:
|
||||
std::string
|
||||
table_id() const {
|
||||
|
||||
@ -27,6 +27,8 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "scheduler/interface/interfaces.h"
|
||||
|
||||
namespace milvus {
|
||||
namespace scheduler {
|
||||
|
||||
@ -39,7 +41,7 @@ enum class JobType {
|
||||
|
||||
using JobId = std::uint64_t;
|
||||
|
||||
class Job {
|
||||
class Job : public interface::dumpable {
|
||||
public:
|
||||
inline JobId
|
||||
id() const {
|
||||
|
||||
@ -63,5 +63,15 @@ SearchJob::GetStatus() {
|
||||
return status_;
|
||||
}
|
||||
|
||||
json
|
||||
SearchJob::Dump() const {
|
||||
json ret{
|
||||
{"topk", topk_},
|
||||
{"nq", nq_},
|
||||
{"nprobe", nprobe_},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace scheduler
|
||||
} // namespace milvus
|
||||
|
||||
@ -61,6 +61,9 @@ class SearchJob : public Job {
|
||||
Status&
|
||||
GetStatus();
|
||||
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
public:
|
||||
uint64_t
|
||||
topk() const {
|
||||
|
||||
@ -24,7 +24,7 @@ namespace scheduler {
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& out, const CpuResource& resource) {
|
||||
out << resource.Dump();
|
||||
out << resource.Dump().dump();
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
@ -28,11 +28,6 @@ class CpuResource : public Resource {
|
||||
public:
|
||||
explicit CpuResource(std::string name, uint64_t device_id, bool enable_loader, bool enable_executor);
|
||||
|
||||
inline std::string
|
||||
Dump() const override {
|
||||
return "<CpuResource, name=" + name_ + ">";
|
||||
}
|
||||
|
||||
friend std::ostream&
|
||||
operator<<(std::ostream& out, const CpuResource& resource);
|
||||
|
||||
|
||||
@ -28,11 +28,6 @@ class DiskResource : public Resource {
|
||||
public:
|
||||
explicit DiskResource(std::string name, uint64_t device_id, bool enable_loader, bool enable_executor);
|
||||
|
||||
inline std::string
|
||||
Dump() const override {
|
||||
return "<DiskResource, name=" + name_ + ">";
|
||||
}
|
||||
|
||||
friend std::ostream&
|
||||
operator<<(std::ostream& out, const DiskResource& resource);
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ namespace scheduler {
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& out, const GpuResource& resource) {
|
||||
out << resource.Dump();
|
||||
out << resource.Dump().dump();
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
@ -29,11 +29,6 @@ class GpuResource : public Resource {
|
||||
public:
|
||||
explicit GpuResource(std::string name, uint64_t device_id, bool enable_loader, bool enable_executor);
|
||||
|
||||
inline std::string
|
||||
Dump() const override {
|
||||
return "<GpuResource, name=" + name_ + ">";
|
||||
}
|
||||
|
||||
friend std::ostream&
|
||||
operator<<(std::ostream& out, const GpuResource& resource);
|
||||
|
||||
|
||||
@ -38,15 +38,21 @@ Node::GetNeighbours() {
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string
|
||||
Node::Dump() {
|
||||
std::stringstream ss;
|
||||
ss << "<Node, id=" << std::to_string(id_) << ">::neighbours:" << std::endl;
|
||||
json
|
||||
Node::Dump() const {
|
||||
json neighbours;
|
||||
for (auto& neighbour : neighbours_) {
|
||||
ss << "\t<Neighbour, id=" << std::to_string(neighbour.first);
|
||||
ss << ", connection: " << neighbour.second.connection.Dump() << ">" << std::endl;
|
||||
json n;
|
||||
n["id"] = neighbour.first;
|
||||
n["connection"] = neighbour.second.connection.Dump();
|
||||
neighbours.push_back(n);
|
||||
}
|
||||
return ss.str();
|
||||
|
||||
json ret{
|
||||
{"id", id_},
|
||||
{"neighbours", neighbours},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "Connection.h"
|
||||
#include "scheduler/TaskTable.h"
|
||||
#include "scheduler/interface/interfaces.h"
|
||||
|
||||
namespace milvus {
|
||||
namespace scheduler {
|
||||
@ -41,7 +42,7 @@ struct Neighbour {
|
||||
};
|
||||
|
||||
// TODO(lxj): return type void -> Status
|
||||
class Node {
|
||||
class Node : public interface::dumpable {
|
||||
public:
|
||||
Node();
|
||||
|
||||
@ -52,8 +53,8 @@ class Node {
|
||||
GetNeighbours();
|
||||
|
||||
public:
|
||||
std::string
|
||||
Dump();
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
private:
|
||||
std::mutex mutex_;
|
||||
|
||||
@ -32,6 +32,22 @@ operator<<(std::ostream& out, const Resource& resource) {
|
||||
return out;
|
||||
}
|
||||
|
||||
std::string
|
||||
ToString(ResourceType type) {
|
||||
switch (type) {
|
||||
case ResourceType::DISK: {
|
||||
return "DISK";
|
||||
}
|
||||
case ResourceType::CPU: {
|
||||
return "CPU";
|
||||
}
|
||||
case ResourceType::GPU: {
|
||||
return "GPU";
|
||||
}
|
||||
default: { return "UNKNOWN"; }
|
||||
}
|
||||
}
|
||||
|
||||
Resource::Resource(std::string name, ResourceType type, uint64_t device_id, bool enable_loader, bool enable_executor)
|
||||
: name_(std::move(name)),
|
||||
type_(type),
|
||||
@ -89,6 +105,22 @@ Resource::WakeupExecutor() {
|
||||
exec_cv_.notify_one();
|
||||
}
|
||||
|
||||
json
|
||||
Resource::Dump() const {
|
||||
json ret{
|
||||
{"device_id", device_id_},
|
||||
{"name", name_},
|
||||
{"type", ToString(type_)},
|
||||
{"task_average_cost", TaskAvgCost()},
|
||||
{"task_total_cost", total_cost_},
|
||||
{"total_tasks", total_task_},
|
||||
{"running", running_},
|
||||
{"enable_loader", enable_loader_},
|
||||
{"enable_executor", enable_executor_},
|
||||
};
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
Resource::NumOfTaskToExec() {
|
||||
uint64_t count = 0;
|
||||
|
||||
@ -77,10 +77,8 @@ class Resource : public Node, public std::enable_shared_from_this<Resource> {
|
||||
subscriber_ = std::move(subscriber);
|
||||
}
|
||||
|
||||
inline virtual std::string
|
||||
Dump() const {
|
||||
return "<Resource>";
|
||||
}
|
||||
json
|
||||
Dump() const override;
|
||||
|
||||
public:
|
||||
inline std::string
|
||||
@ -121,6 +119,9 @@ class Resource : public Node, public std::enable_shared_from_this<Resource> {
|
||||
// TODO(wxyu): need double ?
|
||||
inline uint64_t
|
||||
TaskAvgCost() const {
|
||||
if (total_task_ == 0) {
|
||||
return 0;
|
||||
}
|
||||
return total_cost_ / total_task_;
|
||||
}
|
||||
|
||||
|
||||
@ -29,11 +29,6 @@ class TestResource : public Resource {
|
||||
public:
|
||||
explicit TestResource(std::string name, uint64_t device_id, bool enable_loader, bool enable_executor);
|
||||
|
||||
inline std::string
|
||||
Dump() const override {
|
||||
return "<TestResource, name=" + name_ + ">";
|
||||
}
|
||||
|
||||
friend std::ostream&
|
||||
operator<<(std::ostream& out, const TestResource& resource);
|
||||
|
||||
|
||||
@ -30,9 +30,6 @@ add_library(milvus_sdk STATIC
|
||||
|
||||
target_link_libraries(milvus_sdk
|
||||
${client_grpc_lib}
|
||||
bzip2
|
||||
lz4
|
||||
snappy
|
||||
zlib
|
||||
)
|
||||
|
||||
|
||||
@ -16,8 +16,8 @@
|
||||
// under the License.
|
||||
|
||||
#include "sdk/grpc/ClientProxy.h"
|
||||
#include "../../../version.h"
|
||||
#include "grpc/gen-milvus/milvus.grpc.pb.h"
|
||||
#include "src/version.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#include "server/DBWrapper.h"
|
||||
#include "server/Server.h"
|
||||
#include "server/grpc_impl/GrpcServer.h"
|
||||
#include "src/version.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/LogUtil.h"
|
||||
#include "utils/SignalUtil.h"
|
||||
@ -180,6 +181,9 @@ Server::Start() {
|
||||
|
||||
InitLog(log_config_file_);
|
||||
|
||||
// print version information
|
||||
SERVER_LOG_INFO << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
|
||||
|
||||
server::Metrics::GetInstance().Init();
|
||||
server::SystemInfo::GetInstance().Init();
|
||||
|
||||
|
||||
@ -23,12 +23,12 @@
|
||||
#include <vector>
|
||||
//#include <gperftools/profiler.h>
|
||||
|
||||
#include "../../../version.h"
|
||||
#include "GrpcServer.h"
|
||||
#include "db/Utils.h"
|
||||
#include "scheduler/SchedInst.h"
|
||||
#include "server/DBWrapper.h"
|
||||
#include "server/Server.h"
|
||||
#include "src/version.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/TimeRecorder.h"
|
||||
|
||||
7
core/thirdparty/versions.txt
vendored
7
core/thirdparty/versions.txt
vendored
@ -1,18 +1,13 @@
|
||||
BOOST_VERSION=1.70.0
|
||||
BZIP2_VERSION=1.0.6
|
||||
EASYLOGGINGPP_VERSION=v9.96.7
|
||||
GTEST_VERSION=1.8.1
|
||||
LZ4_VERSION=v1.9.1
|
||||
MYSQLPP_VERSION=3.2.4
|
||||
PROMETHEUS_VERSION=v0.7.0
|
||||
SNAPPY_VERSION=1.1.7
|
||||
SQLITE_VERSION=3280000
|
||||
SQLITE_ORM_VERSION=master
|
||||
YAMLCPP_VERSION=0.6.2
|
||||
ZLIB_VERSION=v1.2.11
|
||||
ZSTD_VERSION=v1.4.0
|
||||
LIBUNWIND_VERSION=1.3.1
|
||||
GPERFTOOLS_VERSION=2.7
|
||||
GRPC_VERSION=master
|
||||
ZLIB_VERSION=v1.2.11
|
||||
|
||||
# vim: set filetype=sh:
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get install -y gfortran libmysqlclient-dev mysql-client libcurl4-openssl-dev libboost-system-dev libboost-filesystem-dev libboost-serialization-dev
|
||||
sudo apt-get install -y gfortran libmysqlclient-dev mysql-client libcurl4-openssl-dev libboost-system-dev \
|
||||
libboost-filesystem-dev libboost-serialization-dev libboost-regex-dev
|
||||
|
||||
sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
|
||||
|
||||
@ -102,7 +102,6 @@ set(unittest_libs
|
||||
sqlite
|
||||
libboost_system.a
|
||||
libboost_filesystem.a
|
||||
lz4
|
||||
mysqlpp
|
||||
yaml-cpp
|
||||
gtest
|
||||
|
||||
@ -31,12 +31,5 @@ target_link_libraries(test_db
|
||||
|
||||
install(TARGETS test_db DESTINATION unittest)
|
||||
|
||||
configure_file(appendix/server_config.yaml
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/server_config.yaml"
|
||||
COPYONLY)
|
||||
|
||||
configure_file(appendix/log_config.conf
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/log_config.conf"
|
||||
COPYONLY)
|
||||
|
||||
|
||||
|
||||
@ -1,27 +0,0 @@
|
||||
* GLOBAL:
|
||||
FORMAT = "%datetime | %level | %logger | %msg"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-global.log"
|
||||
ENABLED = true
|
||||
TO_FILE = true
|
||||
TO_STANDARD_OUTPUT = false
|
||||
SUBSECOND_PRECISION = 3
|
||||
PERFORMANCE_TRACKING = false
|
||||
MAX_LOG_FILE_SIZE = 209715200 ## Throw log files away after 200MB
|
||||
* DEBUG:
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-debug.log"
|
||||
ENABLED = true
|
||||
* WARNING:
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-warning.log"
|
||||
* TRACE:
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-trace.log"
|
||||
* VERBOSE:
|
||||
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
|
||||
TO_FILE = false
|
||||
TO_STANDARD_OUTPUT = false
|
||||
## Error logs
|
||||
* ERROR:
|
||||
ENABLED = true
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-error.log"
|
||||
* FATAL:
|
||||
ENABLED = true
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"
|
||||
@ -1,37 +0,0 @@
|
||||
# All the following configurations are default values.
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # port range: 1025 ~ 65534
|
||||
deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable
|
||||
time_zone: UTC+8
|
||||
|
||||
db_config:
|
||||
primary_path: /tmp/milvus # path used to store data and meta
|
||||
secondary_path: # path used to store data only, split by semicolon
|
||||
|
||||
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
|
||||
# Keep 'dialect://:@:/', and replace other texts with real values.
|
||||
# Replace 'dialect' with 'mysql' or 'sqlite'
|
||||
|
||||
insert_buffer_size: 4 # GB, maximum insert buffer size allowed
|
||||
build_index_gpu: 0 # gpu id used for building index
|
||||
|
||||
metric_config:
|
||||
enable_monitor: false # enable monitoring or not
|
||||
collector: prometheus # prometheus
|
||||
prometheus_config:
|
||||
port: 8080 # port prometheus used to fetch metrics
|
||||
|
||||
cache_config:
|
||||
cpu_mem_capacity: 16 # GB, CPU memory used for cache
|
||||
cpu_mem_threshold: 0.85 # percentage of data kept when cache cleanup triggered
|
||||
cache_insert_data: false # whether load inserted data into cache
|
||||
|
||||
engine_config:
|
||||
blas_threshold: 20
|
||||
|
||||
resource_config:
|
||||
resource_pool:
|
||||
- cpu
|
||||
- gpu0
|
||||
@ -33,8 +33,6 @@
|
||||
|
||||
namespace {
|
||||
|
||||
static const char *CONFIG_FILE_PATH = "./milvus/conf/server_config.yaml";
|
||||
|
||||
static const char *TABLE_NAME = "test_group";
|
||||
static constexpr int64_t TABLE_DIM = 256;
|
||||
static constexpr int64_t VECTOR_COUNT = 25000;
|
||||
@ -232,8 +230,10 @@ TEST_F(DBTest, DB_TEST) {
|
||||
}
|
||||
|
||||
TEST_F(DBTest, SEARCH_TEST) {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
config_path += CONFIG_FILE;
|
||||
milvus::server::Config &config = milvus::server::Config::GetInstance();
|
||||
milvus::Status s = config.LoadConfigFile(CONFIG_FILE_PATH);
|
||||
milvus::Status s = config.LoadConfigFile(config_path);
|
||||
|
||||
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
|
||||
auto stat = db_->CreateTable(table_info);
|
||||
|
||||
@ -28,11 +28,59 @@
|
||||
#include "db/DBFactory.h"
|
||||
#include "db/Options.h"
|
||||
#include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
|
||||
INITIALIZE_EASYLOGGINGPP
|
||||
|
||||
namespace {
|
||||
|
||||
static const char
|
||||
* CONFIG_STR = "# All the following configurations are default values.\n"
|
||||
"\n"
|
||||
"server_config:\n"
|
||||
" address: 0.0.0.0 # milvus server ip address (IPv4)\n"
|
||||
" port: 19530 # port range: 1025 ~ 65534\n"
|
||||
" deploy_mode: single \n"
|
||||
" time_zone: UTC+8\n"
|
||||
"\n"
|
||||
"db_config:\n"
|
||||
" primary_path: /tmp/milvus # path used to store data and meta\n"
|
||||
" secondary_path: # path used to store data only, split by semicolon\n"
|
||||
"\n"
|
||||
" backend_url: sqlite://:@:/ \n"
|
||||
" \n"
|
||||
" # Replace 'dialect' with 'mysql' or 'sqlite'\n"
|
||||
"\n"
|
||||
" insert_buffer_size: 4 # GB, maximum insert buffer size allowed\n"
|
||||
"\n"
|
||||
"metric_config:\n"
|
||||
" enable_monitor: false # enable monitoring or not\n"
|
||||
" collector: prometheus # prometheus\n"
|
||||
" prometheus_config:\n"
|
||||
" port: 8080 # port prometheus used to fetch metrics\n"
|
||||
"\n"
|
||||
"cache_config:\n"
|
||||
" cpu_mem_capacity: 16 # GB, CPU memory used for cache\n"
|
||||
" cpu_mem_threshold: 0.85 # percentage of data kept when cache cleanup triggered\n"
|
||||
" cache_insert_data: false # whether load inserted data into cache\n"
|
||||
"\n"
|
||||
"engine_config:\n"
|
||||
" blas_threshold: 20\n"
|
||||
"\n"
|
||||
"resource_config:\n"
|
||||
" resource_pool:\n"
|
||||
" - gpu0\n"
|
||||
" index_build_device: gpu0 # GPU used for building index";
|
||||
|
||||
void
|
||||
WriteToFile(const std::string& file_path, const char* content) {
|
||||
std::fstream fs(file_path.c_str(), std::ios_base::out);
|
||||
|
||||
//write data to file
|
||||
fs << content;
|
||||
fs.close();
|
||||
}
|
||||
|
||||
class DBTestEnvironment : public ::testing::Environment {
|
||||
public:
|
||||
explicit DBTestEnvironment(const std::string& uri)
|
||||
@ -84,7 +132,7 @@ BaseTest::TearDown() {
|
||||
milvus::engine::DBOptions
|
||||
BaseTest::GetOptions() {
|
||||
auto options = milvus::engine::DBFactory::BuildOption();
|
||||
options.meta_.path_ = "/tmp/milvus_test";
|
||||
options.meta_.path_ = CONFIG_PATH;
|
||||
options.meta_.backend_uri_ = "sqlite://:@:/";
|
||||
return options;
|
||||
}
|
||||
@ -111,6 +159,9 @@ DBTest::SetUp() {
|
||||
|
||||
auto options = GetOptions();
|
||||
db_ = milvus::engine::DBFactory::Build(options);
|
||||
|
||||
std::string config_path(options.meta_.path_ + CONFIG_FILE);
|
||||
WriteToFile(config_path, CONFIG_STR);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@ -42,6 +42,9 @@
|
||||
#define STOP_TIMER(name)
|
||||
#endif
|
||||
|
||||
static const char *CONFIG_PATH = "/tmp/milvus_test";
|
||||
static const char *CONFIG_FILE = "/server_config.yaml";
|
||||
|
||||
class BaseTest : public ::testing::Test {
|
||||
protected:
|
||||
void InitLog();
|
||||
|
||||
@ -59,9 +59,6 @@ set(client_grpc_lib
|
||||
target_link_libraries(test_server
|
||||
knowhere
|
||||
stdc++
|
||||
snappy
|
||||
bz2
|
||||
zstd
|
||||
${client_grpc_lib}
|
||||
${unittest_libs}
|
||||
)
|
||||
|
||||
@ -33,13 +33,13 @@ static constexpr uint64_t GB = MB * 1024;
|
||||
} // namespace
|
||||
|
||||
TEST_F(ConfigTest, CONFIG_TEST) {
|
||||
milvus::server::ConfigMgr *config_mgr = milvus::server::YamlConfigMgr::GetInstance();
|
||||
milvus::server::ConfigMgr* config_mgr = milvus::server::YamlConfigMgr::GetInstance();
|
||||
|
||||
milvus::Status s = config_mgr->LoadConfigFile("");
|
||||
ASSERT_FALSE(s.ok());
|
||||
|
||||
std::string config_path(CONFIG_PATH);
|
||||
s = config_mgr->LoadConfigFile(config_path+ INVALID_CONFIG_FILE);
|
||||
s = config_mgr->LoadConfigFile(config_path + INVALID_CONFIG_FILE);
|
||||
ASSERT_FALSE(s.ok());
|
||||
|
||||
s = config_mgr->LoadConfigFile(config_path + VALID_CONFIG_FILE);
|
||||
@ -47,11 +47,11 @@ TEST_F(ConfigTest, CONFIG_TEST) {
|
||||
|
||||
config_mgr->Print();
|
||||
|
||||
milvus::server::ConfigNode &root_config = config_mgr->GetRootNode();
|
||||
milvus::server::ConfigNode &server_config = root_config.GetChild("server_config");
|
||||
milvus::server::ConfigNode &db_config = root_config.GetChild("db_config");
|
||||
milvus::server::ConfigNode &metric_config = root_config.GetChild("metric_config");
|
||||
milvus::server::ConfigNode &cache_config = root_config.GetChild("cache_config");
|
||||
milvus::server::ConfigNode& root_config = config_mgr->GetRootNode();
|
||||
milvus::server::ConfigNode& server_config = root_config.GetChild("server_config");
|
||||
milvus::server::ConfigNode& db_config = root_config.GetChild("db_config");
|
||||
milvus::server::ConfigNode& metric_config = root_config.GetChild("metric_config");
|
||||
milvus::server::ConfigNode& cache_config = root_config.GetChild("cache_config");
|
||||
milvus::server::ConfigNode invalid_config = root_config.GetChild("invalid_config");
|
||||
auto valus = invalid_config.GetSequence("not_exist");
|
||||
float ff = invalid_config.GetFloatValue("not_exist", 3.0);
|
||||
@ -100,7 +100,7 @@ TEST_F(ConfigTest, CONFIG_TEST) {
|
||||
|
||||
TEST_F(ConfigTest, SERVER_CONFIG_TEST) {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
milvus::server::Config &config = milvus::server::Config::GetInstance();
|
||||
milvus::server::Config& config = milvus::server::Config::GetInstance();
|
||||
milvus::Status s = config.LoadConfigFile(config_path + VALID_CONFIG_FILE);
|
||||
ASSERT_TRUE(s.ok());
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include "server/grpc_impl/GrpcRequestHandler.h"
|
||||
#include "server/grpc_impl/GrpcRequestScheduler.h"
|
||||
#include "server/grpc_impl/GrpcRequestTask.h"
|
||||
#include "../version.h"
|
||||
#include "src/version.h"
|
||||
|
||||
#include "grpc/gen-milvus/milvus.grpc.pb.h"
|
||||
#include "grpc/gen-status/status.pb.h"
|
||||
@ -36,7 +36,7 @@
|
||||
|
||||
namespace {
|
||||
|
||||
static const char *TABLE_NAME = "test_grpc";
|
||||
static const char* TABLE_NAME = "test_grpc";
|
||||
static constexpr int64_t TABLE_DIM = 256;
|
||||
static constexpr int64_t INDEX_FILE_SIZE = 1024;
|
||||
static constexpr int64_t VECTOR_COUNT = 1000;
|
||||
@ -109,7 +109,7 @@ class RpcHandlerTest : public testing::Test {
|
||||
|
||||
void
|
||||
BuildVectors(int64_t from, int64_t to,
|
||||
std::vector<std::vector<float >> &vector_record_array) {
|
||||
std::vector<std::vector<float >>& vector_record_array) {
|
||||
if (to <= from) {
|
||||
return;
|
||||
}
|
||||
@ -119,7 +119,7 @@ BuildVectors(int64_t from, int64_t to,
|
||||
std::vector<float> record;
|
||||
record.resize(TABLE_DIM);
|
||||
for (int64_t i = 0; i < TABLE_DIM; i++) {
|
||||
record[i] = (float) (k % (i + 1));
|
||||
record[i] = (float)(k % (i + 1));
|
||||
}
|
||||
|
||||
vector_record_array.emplace_back(record);
|
||||
@ -136,7 +136,7 @@ CurrentTmDate(int64_t offset_day = 0) {
|
||||
gmtime_r(&tt, &t);
|
||||
|
||||
std::string str = std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1)
|
||||
+ "-" + std::to_string(t.tm_mday);
|
||||
+ "-" + std::to_string(t.tm_mday);
|
||||
|
||||
return str;
|
||||
}
|
||||
@ -200,8 +200,8 @@ TEST_F(RpcHandlerTest, INSERT_TEST) {
|
||||
std::vector<std::vector<float>> record_array;
|
||||
BuildVectors(0, VECTOR_COUNT, record_array);
|
||||
::milvus::grpc::VectorIds vector_ids;
|
||||
for (auto &record : record_array) {
|
||||
::milvus::grpc::RowRecord *grpc_record = request.add_row_record_array();
|
||||
for (auto& record : record_array) {
|
||||
::milvus::grpc::RowRecord* grpc_record = request.add_row_record_array();
|
||||
for (size_t i = 0; i < record.size(); i++) {
|
||||
grpc_record->add_vector_data(record[i]);
|
||||
}
|
||||
@ -239,8 +239,8 @@ TEST_F(RpcHandlerTest, SEARCH_TEST) {
|
||||
std::vector<std::vector<float>> record_array;
|
||||
BuildVectors(0, VECTOR_COUNT, record_array);
|
||||
::milvus::grpc::InsertParam insert_param;
|
||||
for (auto &record : record_array) {
|
||||
::milvus::grpc::RowRecord *grpc_record = insert_param.add_row_record_array();
|
||||
for (auto& record : record_array) {
|
||||
::milvus::grpc::RowRecord* grpc_record = insert_param.add_row_record_array();
|
||||
for (size_t i = 0; i < record.size(); i++) {
|
||||
grpc_record->add_vector_data(record[i]);
|
||||
}
|
||||
@ -252,16 +252,16 @@ TEST_F(RpcHandlerTest, SEARCH_TEST) {
|
||||
sleep(7);
|
||||
|
||||
BuildVectors(0, 10, record_array);
|
||||
for (auto &record : record_array) {
|
||||
::milvus::grpc::RowRecord *row_record = request.add_query_record_array();
|
||||
for (auto &rec : record) {
|
||||
for (auto& record : record_array) {
|
||||
::milvus::grpc::RowRecord* row_record = request.add_query_record_array();
|
||||
for (auto& rec : record) {
|
||||
row_record->add_vector_data(rec);
|
||||
}
|
||||
}
|
||||
handler->Search(&context, &request, &response);
|
||||
|
||||
//test search with range
|
||||
::milvus::grpc::Range *range = request.mutable_query_range_array()->Add();
|
||||
::milvus::grpc::Range* range = request.mutable_query_range_array()->Add();
|
||||
range->set_start_value(CurrentTmDate(-2));
|
||||
range->set_end_value(CurrentTmDate(-3));
|
||||
handler->Search(&context, &request, &response);
|
||||
@ -273,7 +273,7 @@ TEST_F(RpcHandlerTest, SEARCH_TEST) {
|
||||
handler->Search(&context, &request, &response);
|
||||
|
||||
::milvus::grpc::SearchInFilesParam search_in_files_param;
|
||||
std::string *file_id = search_in_files_param.add_file_id_array();
|
||||
std::string* file_id = search_in_files_param.add_file_id_array();
|
||||
*file_id = "test_tbl";
|
||||
handler->SearchInFiles(&context, &search_in_files_param, &response);
|
||||
}
|
||||
@ -323,8 +323,8 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
|
||||
//test empty row record
|
||||
handler->Insert(&context, &request, &vector_ids);
|
||||
|
||||
for (auto &record : record_array) {
|
||||
::milvus::grpc::RowRecord *grpc_record = request.add_row_record_array();
|
||||
for (auto& record : record_array) {
|
||||
::milvus::grpc::RowRecord* grpc_record = request.add_row_record_array();
|
||||
for (size_t i = 0; i < record.size(); i++) {
|
||||
grpc_record->add_vector_data(record[i]);
|
||||
}
|
||||
@ -341,7 +341,7 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
|
||||
request.clear_row_record_array();
|
||||
vector_ids.clear_vector_id_array();
|
||||
for (uint64_t i = 0; i < 10; ++i) {
|
||||
::milvus::grpc::RowRecord *grpc_record = request.add_row_record_array();
|
||||
::milvus::grpc::RowRecord* grpc_record = request.add_row_record_array();
|
||||
for (size_t j = 0; j < 10; j++) {
|
||||
grpc_record->add_vector_data(record_array[i][j]);
|
||||
}
|
||||
@ -431,12 +431,12 @@ class DummyTask : public milvus::server::grpc::GrpcBaseTask {
|
||||
}
|
||||
|
||||
static milvus::server::grpc::BaseTaskPtr
|
||||
Create(std::string &dummy) {
|
||||
Create(std::string& dummy) {
|
||||
return std::shared_ptr<milvus::server::grpc::GrpcBaseTask>(new DummyTask(dummy));
|
||||
}
|
||||
|
||||
public:
|
||||
explicit DummyTask(std::string &dummy) : GrpcBaseTask(dummy) {
|
||||
explicit DummyTask(std::string& dummy) : GrpcBaseTask(dummy) {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -41,11 +41,3 @@ target_link_libraries(test_wrapper
|
||||
${unittest_libs})
|
||||
|
||||
install(TARGETS test_wrapper DESTINATION unittest)
|
||||
|
||||
configure_file(appendix/server_config.yaml
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/server_config.yaml"
|
||||
COPYONLY)
|
||||
|
||||
configure_file(appendix/log_config.conf
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/log_config.conf"
|
||||
COPYONLY)
|
||||
@ -1,27 +0,0 @@
|
||||
* GLOBAL:
|
||||
FORMAT = "%datetime | %level | %logger | %msg"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-global.log"
|
||||
ENABLED = true
|
||||
TO_FILE = true
|
||||
TO_STANDARD_OUTPUT = false
|
||||
SUBSECOND_PRECISION = 3
|
||||
PERFORMANCE_TRACKING = false
|
||||
MAX_LOG_FILE_SIZE = 209715200 ## Throw log files away after 200MB
|
||||
* DEBUG:
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-debug.log"
|
||||
ENABLED = true
|
||||
* WARNING:
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-warning.log"
|
||||
* TRACE:
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-trace.log"
|
||||
* VERBOSE:
|
||||
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
|
||||
TO_FILE = false
|
||||
TO_STANDARD_OUTPUT = false
|
||||
## Error logs
|
||||
* ERROR:
|
||||
ENABLED = true
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-error.log"
|
||||
* FATAL:
|
||||
ENABLED = true
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"
|
||||
@ -1,37 +0,0 @@
|
||||
# All the following configurations are default values.
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # port range: 1025 ~ 65534
|
||||
deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable
|
||||
time_zone: UTC+8
|
||||
|
||||
db_config:
|
||||
primary_path: /tmp/milvus # path used to store data and meta
|
||||
secondary_path: # path used to store data only, split by semicolon
|
||||
|
||||
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
|
||||
# Keep 'dialect://:@:/', and replace other texts with real values.
|
||||
# Replace 'dialect' with 'mysql' or 'sqlite'
|
||||
|
||||
insert_buffer_size: 4 # GB, maximum insert buffer size allowed
|
||||
build_index_gpu: 0 # gpu id used for building index
|
||||
|
||||
metric_config:
|
||||
enable_monitor: false # enable monitoring or not
|
||||
collector: prometheus # prometheus
|
||||
prometheus_config:
|
||||
port: 8080 # port prometheus used to fetch metrics
|
||||
|
||||
cache_config:
|
||||
cpu_mem_capacity: 16 # GB, CPU memory used for cache
|
||||
cpu_mem_threshold: 0.85 # percentage of data kept when cache cleanup triggered
|
||||
cache_insert_data: false # whether load inserted data into cache
|
||||
|
||||
engine_config:
|
||||
blas_threshold: 20
|
||||
|
||||
resource_config:
|
||||
resource_pool:
|
||||
- cpu
|
||||
- gpu0
|
||||
@ -16,20 +16,16 @@
|
||||
// under the License.
|
||||
|
||||
#include "wrapper/KnowhereResource.h"
|
||||
#include "wrapper/utils.h"
|
||||
#include "server/Config.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace {
|
||||
|
||||
static const char* CONFIG_FILE_PATH = "./milvus/conf/server_config.yaml";
|
||||
static const char* LOG_FILE_PATH = "./milvus/conf/log_config.conf";
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(KnowhereTest, KNOWHERE_RESOURCE_TEST) {
|
||||
milvus::server::Config &config = milvus::server::Config::GetInstance();
|
||||
milvus::Status s = config.LoadConfigFile(CONFIG_FILE_PATH);
|
||||
TEST_F(KnowhereTest, KNOWHERE_RESOURCE_TEST) {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
config_path += CONFIG_FILE;
|
||||
milvus::server::Config& config = milvus::server::Config::GetInstance();
|
||||
milvus::Status s = config.LoadConfigFile(config_path);
|
||||
ASSERT_TRUE(s.ok());
|
||||
|
||||
milvus::engine::KnowhereResource::Initialize();
|
||||
|
||||
@ -18,13 +18,78 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <faiss/IndexFlat.h>
|
||||
#include <string>
|
||||
|
||||
#include "wrapper/utils.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
|
||||
namespace {
|
||||
static const char
|
||||
* CONFIG_STR = "# All the following configurations are default values.\n"
|
||||
"\n"
|
||||
"server_config:\n"
|
||||
" address: 0.0.0.0 # milvus server ip address (IPv4)\n"
|
||||
" port: 19530 # port range: 1025 ~ 65534\n"
|
||||
" deploy_mode: single \n"
|
||||
" time_zone: UTC+8\n"
|
||||
"\n"
|
||||
"db_config:\n"
|
||||
" primary_path: /tmp/milvus # path used to store data and meta\n"
|
||||
" secondary_path: # path used to store data only, split by semicolon\n"
|
||||
"\n"
|
||||
" backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database\n"
|
||||
" \n"
|
||||
" # Replace 'dialect' with 'mysql' or 'sqlite'\n"
|
||||
"\n"
|
||||
" insert_buffer_size: 4 # GB, maximum insert buffer size allowed\n"
|
||||
"\n"
|
||||
"metric_config:\n"
|
||||
" enable_monitor: false # enable monitoring or not\n"
|
||||
" collector: prometheus # prometheus\n"
|
||||
" prometheus_config:\n"
|
||||
" port: 8080 # port prometheus used to fetch metrics\n"
|
||||
"\n"
|
||||
"cache_config:\n"
|
||||
" cpu_mem_capacity: 16 # GB, CPU memory used for cache\n"
|
||||
" cpu_mem_threshold: 0.85 # percentage of data kept when cache cleanup triggered\n"
|
||||
" cache_insert_data: false # whether load inserted data into cache\n"
|
||||
"\n"
|
||||
"engine_config:\n"
|
||||
" blas_threshold: 20\n"
|
||||
"\n"
|
||||
"resource_config:\n"
|
||||
" resource_pool:\n"
|
||||
" - gpu0\n"
|
||||
" index_build_device: gpu0 # GPU used for building index";
|
||||
|
||||
void
|
||||
DataGenBase::GenData(const int &dim, const int &nb, const int &nq,
|
||||
float *xb, float *xq, int64_t *ids,
|
||||
const int &k, int64_t *gt_ids, float *gt_dis) {
|
||||
WriteToFile(const std::string& file_path, const char* content) {
|
||||
std::fstream fs(file_path.c_str(), std::ios_base::out);
|
||||
|
||||
//write data to file
|
||||
fs << content;
|
||||
fs.close();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void
|
||||
KnowhereTest::SetUp() {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
milvus::server::CommonUtil::CreateDirectory(config_path);
|
||||
WriteToFile(config_path + CONFIG_FILE, CONFIG_STR);
|
||||
}
|
||||
|
||||
void
|
||||
KnowhereTest::TearDown() {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
milvus::server::CommonUtil::DeleteDirectory(config_path);
|
||||
}
|
||||
|
||||
void
|
||||
DataGenBase::GenData(const int& dim, const int& nb, const int& nq,
|
||||
float* xb, float* xq, int64_t* ids,
|
||||
const int& k, int64_t* gt_ids, float* gt_dis) {
|
||||
for (auto i = 0; i < nb; ++i) {
|
||||
for (auto j = 0; j < dim; ++j) {
|
||||
//p_data[i * d + j] = float(base + i);
|
||||
@ -44,15 +109,15 @@ DataGenBase::GenData(const int &dim, const int &nb, const int &nq,
|
||||
}
|
||||
|
||||
void
|
||||
DataGenBase::GenData(const int &dim,
|
||||
const int &nb,
|
||||
const int &nq,
|
||||
std::vector<float> &xb,
|
||||
std::vector<float> &xq,
|
||||
std::vector<int64_t> &ids,
|
||||
const int &k,
|
||||
std::vector<int64_t> >_ids,
|
||||
std::vector<float> >_dis) {
|
||||
DataGenBase::GenData(const int& dim,
|
||||
const int& nb,
|
||||
const int& nq,
|
||||
std::vector<float>& xb,
|
||||
std::vector<float>& xq,
|
||||
std::vector<int64_t>& ids,
|
||||
const int& k,
|
||||
std::vector<int64_t>& gt_ids,
|
||||
std::vector<float>& gt_dis) {
|
||||
xb.resize(nb * dim);
|
||||
xq.resize(nq * dim);
|
||||
ids.resize(nb);
|
||||
@ -63,27 +128,27 @@ DataGenBase::GenData(const int &dim,
|
||||
|
||||
void
|
||||
DataGenBase::AssertResult(const std::vector<int64_t>& ids, const std::vector<float>& dis) {
|
||||
EXPECT_EQ(ids.size(), nq * k);
|
||||
EXPECT_EQ(dis.size(), nq * k);
|
||||
EXPECT_EQ(ids.size(), nq * k);
|
||||
EXPECT_EQ(dis.size(), nq * k);
|
||||
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(ids[i * k], gt_ids[i * k]);
|
||||
//EXPECT_EQ(dis[i * k], gt_dis[i * k]);
|
||||
}
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(ids[i * k], gt_ids[i * k]);
|
||||
//EXPECT_EQ(dis[i * k], gt_dis[i * k]);
|
||||
}
|
||||
|
||||
int match = 0;
|
||||
for (int i = 0; i < nq; ++i) {
|
||||
for (int j = 0; j < k; ++j) {
|
||||
for (int l = 0; l < k; ++l) {
|
||||
if (ids[i * nq + j] == gt_ids[i * nq + l]) match++;
|
||||
}
|
||||
int match = 0;
|
||||
for (int i = 0; i < nq; ++i) {
|
||||
for (int j = 0; j < k; ++j) {
|
||||
for (int l = 0; l < k; ++l) {
|
||||
if (ids[i * nq + j] == gt_ids[i * nq + l]) match++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto precision = float(match) / (nq * k);
|
||||
EXPECT_GT(precision, 0.5);
|
||||
std::cout << std::endl << "Precision: " << precision
|
||||
<< ", match: " << match
|
||||
<< ", total: " << nq * k
|
||||
<< std::endl;
|
||||
auto precision = float(match) / (nq * k);
|
||||
EXPECT_GT(precision, 0.5);
|
||||
std::cout << std::endl << "Precision: " << precision
|
||||
<< ", match: " << match
|
||||
<< ", total: " << nq * k
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <cstdlib>
|
||||
@ -40,6 +41,15 @@ constexpr int64_t PINMEM = 1024 * 1024 * 200;
|
||||
constexpr int64_t TEMPMEM = 1024 * 1024 * 300;
|
||||
constexpr int64_t RESNUM = 2;
|
||||
|
||||
static const char *CONFIG_PATH = "/tmp/milvus_test";
|
||||
static const char *CONFIG_FILE = "/server_config.yaml";
|
||||
|
||||
class KnowhereTest : public ::testing::Test {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
};
|
||||
|
||||
class DataGenBase {
|
||||
public:
|
||||
virtual void GenData(const int& dim, const int& nb, const int& nq, float* xb, float* xq, int64_t* ids,
|
||||
|
||||
@ -17,9 +17,9 @@ allure-pytest==2.7.0
|
||||
pytest-print==0.1.2
|
||||
pytest-level==0.1.1
|
||||
six==1.12.0
|
||||
thrift==0.11.0
|
||||
typed-ast==1.3.5
|
||||
wcwidth==0.1.7
|
||||
wrapt==1.11.1
|
||||
zipp==0.5.1
|
||||
pymilvus-test>=0.2.0
|
||||
scikit-learn>=0.19.1
|
||||
pymilvus-test>=0.2.0
|
||||
@ -16,9 +16,6 @@ ADD_TIMEOUT = 60
|
||||
nprobe = 1
|
||||
epsilon = 0.0001
|
||||
|
||||
index_params = random.choice(gen_index_params())
|
||||
logging.getLogger().info(index_params)
|
||||
|
||||
|
||||
class TestAddBase:
|
||||
"""
|
||||
@ -26,6 +23,15 @@ class TestAddBase:
|
||||
The following cases are used to test `add_vectors / index / search / delete` mixed function
|
||||
******************************************************************
|
||||
"""
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
return request.param
|
||||
|
||||
def test_add_vector_create_table(self, connect, table):
|
||||
'''
|
||||
@ -71,7 +77,7 @@ class TestAddBase:
|
||||
method: delete table_2 and add vector to table_1
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_delete_table_add_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -79,7 +85,6 @@ class TestAddBase:
|
||||
status = connect.delete_table(table)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(param['table_name'], vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -101,14 +106,13 @@ class TestAddBase:
|
||||
method: add vector and delete table
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_delete_another_table',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
status = connect.create_table(param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
status = connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -131,7 +135,7 @@ class TestAddBase:
|
||||
method: add vector , sleep, and delete table
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_sleep_delete_another_table',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -143,86 +147,91 @@ class TestAddBase:
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_create_index_add_vector(self, connect, table):
|
||||
def test_create_index_add_vector(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector after build index
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
status = connect.create_index(table, index_params)
|
||||
index_param = get_simple_index_params
|
||||
status = connect.create_index(table, index_param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_create_index_add_vector_another(self, connect, table):
|
||||
def test_create_index_add_vector_another(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector to table_2 after build index for table_1
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_create_index_add_vector_another',
|
||||
index_param = get_simple_index_params
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
status = connect.create_table(param)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_create_index(self, connect, table):
|
||||
def test_add_vector_create_index(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test build index add after vector
|
||||
method: add vector and build index
|
||||
expected: status ok
|
||||
'''
|
||||
index_param = get_simple_index_params
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_create_index_another(self, connect, table):
|
||||
def test_add_vector_create_index_another(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector to table_2 after build index for table_1
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_create_index_another',
|
||||
index_param = get_simple_index_params
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
status = connect.create_table(param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
status = connect.create_index(param['table_name'], index_params)
|
||||
connect.delete_table(param['table_name'])
|
||||
status = connect.create_index(param['table_name'], index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_sleep_create_index(self, connect, table):
|
||||
def test_add_vector_sleep_create_index(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test build index add after vector for a while
|
||||
method: add vector and build index
|
||||
expected: status ok
|
||||
'''
|
||||
index_param = get_simple_index_params
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
time.sleep(1)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_sleep_create_index_another(self, connect, table):
|
||||
def test_add_vector_sleep_create_index_another(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector to table_2 after build index for table_1 for a while
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_sleep_create_index_another',
|
||||
index_param = get_simple_index_params
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -230,8 +239,7 @@ class TestAddBase:
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
time.sleep(1)
|
||||
status = connect.create_index(param['table_name'], index_params)
|
||||
connect.delete_table(param['table_name'])
|
||||
status = connect.create_index(param['table_name'], index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -253,7 +261,7 @@ class TestAddBase:
|
||||
method: search table and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_search_vector_add_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -261,7 +269,6 @@ class TestAddBase:
|
||||
vector = gen_single_vector(dim)
|
||||
status, result = connect.search_vectors(table, 1, nprobe, vector)
|
||||
status, ids = connect.add_vectors(param['table_name'], vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -283,7 +290,7 @@ class TestAddBase:
|
||||
method: search table and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_search_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -291,7 +298,6 @@ class TestAddBase:
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -314,7 +320,7 @@ class TestAddBase:
|
||||
method: search table , sleep, and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_sleep_search_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -323,7 +329,6 @@ class TestAddBase:
|
||||
status, ids = connect.add_vectors(table, vector)
|
||||
time.sleep(1)
|
||||
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
"""
|
||||
@ -407,6 +412,7 @@ class TestAddBase:
|
||||
def get_vector_id(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_add_vectors_ids_invalid(self, connect, table, get_vector_id):
|
||||
'''
|
||||
target: test add vectors in table, use customize ids, which are not int64
|
||||
@ -573,7 +579,7 @@ class TestAddBase:
|
||||
nq = 100
|
||||
vectors = gen_vectors(nq, dim)
|
||||
table_list = []
|
||||
for i in range(50):
|
||||
for i in range(20):
|
||||
table_name = gen_unique_str('test_add_vector_multi_tables')
|
||||
table_list.append(table_name)
|
||||
param = {'table_name': table_name,
|
||||
@ -581,9 +587,9 @@ class TestAddBase:
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
connect.create_table(param)
|
||||
time.sleep(2)
|
||||
for j in range(10):
|
||||
for i in range(50):
|
||||
time.sleep(5)
|
||||
for j in range(5):
|
||||
for i in range(20):
|
||||
status, ids = connect.add_vectors(table_name=table_list[i], records=vectors)
|
||||
assert status.OK()
|
||||
|
||||
@ -593,6 +599,15 @@ class TestAddIP:
|
||||
The following cases are used to test `add_vectors / index / search / delete` mixed function
|
||||
******************************************************************
|
||||
"""
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
return request.param
|
||||
|
||||
def test_add_vector_create_table(self, connect, ip_table):
|
||||
'''
|
||||
@ -638,7 +653,7 @@ class TestAddIP:
|
||||
method: delete table_2 and add vector to table_1
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_delete_table_add_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -646,7 +661,6 @@ class TestAddIP:
|
||||
status = connect.delete_table(ip_table)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(param['table_name'], vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -698,7 +712,7 @@ class TestAddIP:
|
||||
method: add vector , sleep, and delete table
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_sleep_delete_another_table',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -710,86 +724,90 @@ class TestAddIP:
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_create_index_add_vector(self, connect, ip_table):
|
||||
def test_create_index_add_vector(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector after build index
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
index_param = get_simple_index_params
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_create_index_add_vector_another(self, connect, ip_table):
|
||||
def test_create_index_add_vector_another(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector to table_2 after build index for table_1
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_create_index_add_vector_another',
|
||||
index_param = get_simple_index_params
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
status = connect.create_table(param)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_create_index(self, connect, ip_table):
|
||||
def test_add_vector_create_index(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test build index add after vector
|
||||
method: add vector and build index
|
||||
expected: status ok
|
||||
'''
|
||||
index_param = get_simple_index_params
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_create_index_another(self, connect, ip_table):
|
||||
def test_add_vector_create_index_another(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector to table_2 after build index for table_1
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_create_index_another',
|
||||
index_param = get_simple_index_params
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
status = connect.create_table(param)
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
status = connect.create_index(param['table_name'], index_params)
|
||||
connect.delete_table(param['table_name'])
|
||||
status = connect.create_index(param['table_name'], index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_sleep_create_index(self, connect, ip_table):
|
||||
def test_add_vector_sleep_create_index(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test build index add after vector for a while
|
||||
method: add vector and build index
|
||||
expected: status ok
|
||||
'''
|
||||
index_param = get_simple_index_params
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
time.sleep(1)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
def test_add_vector_sleep_create_index_another(self, connect, ip_table):
|
||||
def test_add_vector_sleep_create_index_another(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test add vector to table_2 after build index for table_1 for a while
|
||||
method: build index and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_sleep_create_index_another',
|
||||
index_param = get_simple_index_params
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -797,8 +815,7 @@ class TestAddIP:
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
time.sleep(1)
|
||||
status = connect.create_index(param['table_name'], index_params)
|
||||
connect.delete_table(param['table_name'])
|
||||
status = connect.create_index(param['table_name'], index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -820,7 +837,7 @@ class TestAddIP:
|
||||
method: search table and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_search_vector_add_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -828,7 +845,6 @@ class TestAddIP:
|
||||
vector = gen_single_vector(dim)
|
||||
status, result = connect.search_vectors(ip_table, 1, nprobe, vector)
|
||||
status, ids = connect.add_vectors(param['table_name'], vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -850,7 +866,7 @@ class TestAddIP:
|
||||
method: search table and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_search_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -858,7 +874,6 @@ class TestAddIP:
|
||||
vector = gen_single_vector(dim)
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(ADD_TIMEOUT)
|
||||
@ -881,7 +896,7 @@ class TestAddIP:
|
||||
method: search table , sleep, and add vector
|
||||
expected: status ok
|
||||
'''
|
||||
param = {'table_name': 'test_add_vector_sleep_search_vector_another',
|
||||
param = {'table_name': gen_unique_str(),
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
@ -890,7 +905,6 @@ class TestAddIP:
|
||||
status, ids = connect.add_vectors(ip_table, vector)
|
||||
time.sleep(1)
|
||||
status, result = connect.search_vectors(param['table_name'], 1, nprobe, vector)
|
||||
connect.delete_table(param['table_name'])
|
||||
assert status.OK()
|
||||
|
||||
"""
|
||||
@ -974,6 +988,7 @@ class TestAddIP:
|
||||
def get_vector_id(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_add_vectors_ids_invalid(self, connect, ip_table, get_vector_id):
|
||||
'''
|
||||
target: test add vectors in table, use customize ids, which are not int64
|
||||
@ -1128,7 +1143,7 @@ class TestAddIP:
|
||||
nq = 100
|
||||
vectors = gen_vectors(nq, dim)
|
||||
table_list = []
|
||||
for i in range(50):
|
||||
for i in range(20):
|
||||
table_name = gen_unique_str('test_add_vector_multi_tables')
|
||||
table_list.append(table_name)
|
||||
param = {'table_name': table_name,
|
||||
@ -1138,7 +1153,7 @@ class TestAddIP:
|
||||
connect.create_table(param)
|
||||
time.sleep(2)
|
||||
for j in range(10):
|
||||
for i in range(50):
|
||||
for i in range(20):
|
||||
status, ids = connect.add_vectors(table_name=table_list[i], records=vectors)
|
||||
assert status.OK()
|
||||
|
||||
@ -1223,7 +1238,7 @@ class TestAddTableVectorsInvalid(object):
|
||||
with pytest.raises(Exception) as e:
|
||||
status, result = connect.add_vectors(table, tmp_single_vector)
|
||||
|
||||
@pytest.mark.level(1)
|
||||
@pytest.mark.level(2)
|
||||
def test_add_vectors_with_invalid_vectors(self, connect, table, gen_vector):
|
||||
tmp_vectors = copy.deepcopy(self.vectors)
|
||||
tmp_vectors[1][1] = gen_vector
|
||||
|
||||
@ -8,14 +8,15 @@ import pdb
|
||||
import threading
|
||||
from multiprocessing import Pool, Process
|
||||
import numpy
|
||||
import sklearn.preprocessing
|
||||
from milvus import Milvus, IndexType, MetricType
|
||||
from utils import *
|
||||
|
||||
nb = 100000
|
||||
nb = 10000
|
||||
dim = 128
|
||||
index_file_size = 10
|
||||
vectors = gen_vectors(nb, dim)
|
||||
vectors /= numpy.linalg.norm(vectors)
|
||||
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
|
||||
vectors = vectors.tolist()
|
||||
BUILD_TIMEOUT = 60
|
||||
nprobe = 1
|
||||
@ -36,8 +37,11 @@ class TestIndexBase:
|
||||
scope="function",
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_simple_index_params(self, request):
|
||||
yield request.param
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
return request.param
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
@ -65,8 +69,10 @@ class TestIndexBase:
|
||||
method: create table and add vectors in it, check if added successfully
|
||||
expected: raise exception
|
||||
'''
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
with pytest.raises(Exception) as e:
|
||||
status = dis_connect.create_index(table, random.choice(gen_index_params()))
|
||||
status = dis_connect.create_index(table, index_param)
|
||||
|
||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
def test_create_index_search_with_query_vectors(self, connect, table, get_index_params):
|
||||
@ -179,12 +185,14 @@ class TestIndexBase:
|
||||
def test_create_index_table_not_existed(self, connect):
|
||||
'''
|
||||
target: test create index interface when table name not existed
|
||||
method: create table and add vectors in it, create index with an random table_name
|
||||
method: create table and add vectors in it, create index
|
||||
, make sure the table name not in index
|
||||
expected: return code not equals to 0, create index failed
|
||||
'''
|
||||
table_name = gen_unique_str(self.__class__.__name__)
|
||||
status = connect.create_index(table_name, random.choice(gen_index_params()))
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status = connect.create_index(table_name, index_param)
|
||||
assert not status.OK()
|
||||
|
||||
def test_create_index_table_None(self, connect):
|
||||
@ -194,8 +202,10 @@ class TestIndexBase:
|
||||
expected: return code not equals to 0, create index failed
|
||||
'''
|
||||
table_name = None
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
with pytest.raises(Exception) as e:
|
||||
status = connect.create_index(table_name, random.choice(gen_index_params()))
|
||||
status = connect.create_index(table_name, index_param)
|
||||
|
||||
def test_create_index_no_vectors(self, connect, table):
|
||||
'''
|
||||
@ -203,32 +213,34 @@ class TestIndexBase:
|
||||
method: create table and add no vectors in it, and then create index
|
||||
expected: return code equals to 0
|
||||
'''
|
||||
status = connect.create_index(table, random.choice(gen_index_params()))
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
def test_create_index_no_vectors_then_add_vectors(self, connect, table):
|
||||
def test_create_index_no_vectors_then_add_vectors(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test create index interface when there is no vectors in table, and does not affect the subsequent process
|
||||
method: create table and add no vectors in it, and then create index, add vectors in it
|
||||
expected: return code equals to 0
|
||||
'''
|
||||
status = connect.create_index(table, random.choice(gen_index_params()))
|
||||
index_param = get_simple_index_params
|
||||
status = connect.create_index(table, index_param)
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
def test_create_same_index_repeatedly(self, connect, table):
|
||||
def test_create_same_index_repeatedly(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: check if index can be created repeatedly, with the same create_index params
|
||||
method: create index after index have been built
|
||||
expected: return code success, and search ok
|
||||
'''
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
index_params = random.choice(gen_index_params())
|
||||
# index_params = get_index_params
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_params)
|
||||
index_param = get_simple_index_params
|
||||
status = connect.create_index(table, index_param)
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
query_vec = [vectors[0]]
|
||||
top_k = 1
|
||||
@ -243,16 +255,19 @@ class TestIndexBase:
|
||||
method: create another index with different index_params after index have been built
|
||||
expected: return code 0, and describe index result equals with the second index params
|
||||
'''
|
||||
nlist = 16384
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
index_params = random.sample(gen_index_params(), 2)
|
||||
index_type_1 = IndexType.IVF_SQ8
|
||||
index_type_2 = IndexType.IVFLAT
|
||||
index_params = [{"index_type": index_type_1, "nlist": nlist}, {"index_type": index_type_2, "nlist": nlist}]
|
||||
logging.getLogger().info(index_params)
|
||||
status = connect.create_index(table, index_params[0])
|
||||
status = connect.create_index(table, index_params[1])
|
||||
assert status.OK()
|
||||
for index_param in index_params:
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
status, result = connect.describe_index(table)
|
||||
assert result._nlist == index_params[1]["nlist"]
|
||||
assert result._nlist == nlist
|
||||
assert result._table_name == table
|
||||
assert result._index_type == index_params[1]["index_type"]
|
||||
assert result._index_type == index_type_2
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
@ -328,7 +343,7 @@ class TestIndexBase:
|
||||
def test_describe_index_table_not_existed(self, connect):
|
||||
'''
|
||||
target: test describe index interface when table name not existed
|
||||
method: create table and add vectors in it, create index with an random table_name
|
||||
method: create table and add vectors in it, create index
|
||||
, make sure the table name not in index
|
||||
expected: return code not equals to 0, describe index failed
|
||||
'''
|
||||
@ -349,7 +364,7 @@ class TestIndexBase:
|
||||
def test_describe_index_not_create(self, connect, table):
|
||||
'''
|
||||
target: test describe index interface when index not created
|
||||
method: create table and add vectors in it, create index with an random table_name
|
||||
method: create table and add vectors in it, create index
|
||||
, make sure the table name not in index
|
||||
expected: return code not equals to 0, describe index failed
|
||||
'''
|
||||
@ -373,9 +388,9 @@ class TestIndexBase:
|
||||
method: create table and add vectors in it, create index, call drop index
|
||||
expected: return code 0, and default index param
|
||||
'''
|
||||
index_params = get_index_params
|
||||
index_param = get_index_params
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
status, result = connect.describe_index(table)
|
||||
logging.getLogger().info(result)
|
||||
@ -387,15 +402,15 @@ class TestIndexBase:
|
||||
assert result._table_name == table
|
||||
assert result._index_type == IndexType.FLAT
|
||||
|
||||
def test_drop_index_repeatly(self, connect, table, get_simple_index_params):
|
||||
def test_drop_index_repeatly(self, connect, table, get_index_params):
|
||||
'''
|
||||
target: test drop index repeatly
|
||||
method: create index, call drop index, and drop again
|
||||
expected: return code 0
|
||||
'''
|
||||
index_params = get_simple_index_params
|
||||
index_param = get_index_params
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_param)
|
||||
assert status.OK()
|
||||
status, result = connect.describe_index(table)
|
||||
logging.getLogger().info(result)
|
||||
@ -422,7 +437,7 @@ class TestIndexBase:
|
||||
def test_drop_index_table_not_existed(self, connect):
|
||||
'''
|
||||
target: test drop index interface when table name not existed
|
||||
method: create table and add vectors in it, create index with an random table_name
|
||||
method: create table and add vectors in it, create index
|
||||
, make sure the table name not in index, and then drop it
|
||||
expected: return code not equals to 0, drop index failed
|
||||
'''
|
||||
@ -446,8 +461,8 @@ class TestIndexBase:
|
||||
method: create table and add vectors in it, create index
|
||||
expected: return code not equals to 0, drop index failed
|
||||
'''
|
||||
index_params = random.choice(gen_index_params())
|
||||
logging.getLogger().info(index_params)
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
status, result = connect.describe_index(table)
|
||||
logging.getLogger().info(result)
|
||||
@ -483,7 +498,8 @@ class TestIndexBase:
|
||||
method: create index, drop index, four times, each tme use different index_params to create index
|
||||
expected: return code 0
|
||||
'''
|
||||
index_params = random.sample(gen_index_params(), 2)
|
||||
nlist = 16384
|
||||
index_params = [{"index_type": IndexType.IVFLAT, "nlist": nlist}, {"index_type": IndexType.IVF_SQ8, "nlist": nlist}]
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
for i in range(2):
|
||||
status = connect.create_index(table, index_params[i])
|
||||
@ -514,15 +530,18 @@ class TestIndexIP:
|
||||
scope="function",
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_simple_index_params(self, request):
|
||||
yield request.param
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
return request.param
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
The following cases are used to test `create_index` function
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
def test_create_index(self, connect, ip_table, get_index_params):
|
||||
'''
|
||||
@ -543,8 +562,10 @@ class TestIndexIP:
|
||||
method: create table and add vectors in it, check if added successfully
|
||||
expected: raise exception
|
||||
'''
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
with pytest.raises(Exception) as e:
|
||||
status = dis_connect.create_index(ip_table, random.choice(gen_index_params()))
|
||||
status = dis_connect.create_index(ip_table, index_param)
|
||||
|
||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
def test_create_index_search_with_query_vectors(self, connect, ip_table, get_index_params):
|
||||
@ -557,6 +578,7 @@ class TestIndexIP:
|
||||
logging.getLogger().info(index_params)
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
assert status.OK()
|
||||
logging.getLogger().info(connect.describe_index(ip_table))
|
||||
query_vecs = [vectors[0], vectors[1], vectors[2]]
|
||||
top_k = 5
|
||||
@ -658,17 +680,20 @@ class TestIndexIP:
|
||||
method: create table and add no vectors in it, and then create index
|
||||
expected: return code equals to 0
|
||||
'''
|
||||
status = connect.create_index(ip_table, random.choice(gen_index_params()))
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.timeout(BUILD_TIMEOUT)
|
||||
def test_create_index_no_vectors_then_add_vectors(self, connect, ip_table):
|
||||
def test_create_index_no_vectors_then_add_vectors(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test create index interface when there is no vectors in table, and does not affect the subsequent process
|
||||
method: create table and add no vectors in it, and then create index, add vectors in it
|
||||
expected: return code equals to 0
|
||||
'''
|
||||
status = connect.create_index(ip_table, random.choice(gen_index_params()))
|
||||
index_param = get_simple_index_params
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
assert status.OK()
|
||||
|
||||
@ -679,11 +704,11 @@ class TestIndexIP:
|
||||
method: create index after index have been built
|
||||
expected: return code success, and search ok
|
||||
'''
|
||||
nlist = 16384
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
index_params = random.choice(gen_index_params())
|
||||
# index_params = get_index_params
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
assert status.OK()
|
||||
query_vec = [vectors[0]]
|
||||
top_k = 1
|
||||
@ -698,16 +723,19 @@ class TestIndexIP:
|
||||
method: create another index with different index_params after index have been built
|
||||
expected: return code 0, and describe index result equals with the second index params
|
||||
'''
|
||||
nlist = 16384
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
index_params = random.sample(gen_index_params(), 2)
|
||||
index_type_1 = IndexType.IVF_SQ8
|
||||
index_type_2 = IndexType.IVFLAT
|
||||
index_params = [{"index_type": index_type_1, "nlist": nlist}, {"index_type": index_type_2, "nlist": nlist}]
|
||||
logging.getLogger().info(index_params)
|
||||
status = connect.create_index(ip_table, index_params[0])
|
||||
status = connect.create_index(ip_table, index_params[1])
|
||||
assert status.OK()
|
||||
for index_param in index_params:
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
assert status.OK()
|
||||
status, result = connect.describe_index(ip_table)
|
||||
assert result._nlist == index_params[1]["nlist"]
|
||||
assert result._nlist == nlist
|
||||
assert result._table_name == ip_table
|
||||
assert result._index_type == index_params[1]["index_type"]
|
||||
assert result._index_type == index_type_2
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
@ -783,7 +811,7 @@ class TestIndexIP:
|
||||
def test_describe_index_not_create(self, connect, ip_table):
|
||||
'''
|
||||
target: test describe index interface when index not created
|
||||
method: create table and add vectors in it, create index with an random table_name
|
||||
method: create table and add vectors in it, create index
|
||||
, make sure the table name not in index
|
||||
expected: return code not equals to 0, describe index failed
|
||||
'''
|
||||
@ -850,8 +878,10 @@ class TestIndexIP:
|
||||
method: drop index, and check if drop successfully
|
||||
expected: raise exception
|
||||
'''
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVFLAT, "nlist": nlist}
|
||||
with pytest.raises(Exception) as e:
|
||||
status = dis_connect.drop_index(ip_table, random.choice(gen_index_params()))
|
||||
status = dis_connect.drop_index(ip_table, index_param)
|
||||
|
||||
def test_drop_index_table_not_create(self, connect, ip_table):
|
||||
'''
|
||||
@ -859,8 +889,9 @@ class TestIndexIP:
|
||||
method: create table and add vectors in it, create index
|
||||
expected: return code not equals to 0, drop index failed
|
||||
'''
|
||||
index_params = random.choice(gen_index_params())
|
||||
logging.getLogger().info(index_params)
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
logging.getLogger().info(index_param)
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
status, result = connect.describe_index(ip_table)
|
||||
logging.getLogger().info(result)
|
||||
@ -896,7 +927,8 @@ class TestIndexIP:
|
||||
method: create index, drop index, four times, each tme use different index_params to create index
|
||||
expected: return code 0
|
||||
'''
|
||||
index_params = random.sample(gen_index_params(), 2)
|
||||
nlist = 16384
|
||||
index_params = [{"index_type": IndexType.IVFLAT, "nlist": nlist}, {"index_type": IndexType.IVF_SQ8, "nlist": nlist}]
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
for i in range(2):
|
||||
status = connect.create_index(ip_table, index_params[i])
|
||||
@ -927,19 +959,21 @@ class TestIndexTableInvalid(object):
|
||||
def get_table_name(self, request):
|
||||
yield request.param
|
||||
|
||||
# @pytest.mark.level(1)
|
||||
@pytest.mark.level(2)
|
||||
def test_create_index_with_invalid_tablename(self, connect, get_table_name):
|
||||
table_name = get_table_name
|
||||
status = connect.create_index(table_name, random.choice(gen_index_params()))
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status = connect.create_index(table_name, index_param)
|
||||
assert not status.OK()
|
||||
|
||||
# @pytest.mark.level(1)
|
||||
@pytest.mark.level(2)
|
||||
def test_describe_index_with_invalid_tablename(self, connect, get_table_name):
|
||||
table_name = get_table_name
|
||||
status, result = connect.describe_index(table_name)
|
||||
assert not status.OK()
|
||||
|
||||
# @pytest.mark.level(1)
|
||||
@pytest.mark.level(2)
|
||||
def test_drop_index_with_invalid_tablename(self, connect, get_table_name):
|
||||
table_name = get_table_name
|
||||
status = connect.drop_index(table_name)
|
||||
|
||||
@ -6,7 +6,7 @@ import datetime
|
||||
import logging
|
||||
from time import sleep
|
||||
from multiprocessing import Process
|
||||
import numpy
|
||||
import sklearn.preprocessing
|
||||
from milvus import Milvus, IndexType, MetricType
|
||||
from utils import *
|
||||
|
||||
@ -15,7 +15,7 @@ index_file_size = 10
|
||||
table_id = "test_mix"
|
||||
add_interval_time = 2
|
||||
vectors = gen_vectors(100000, dim)
|
||||
vectors /= numpy.linalg.norm(vectors)
|
||||
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
|
||||
vectors = vectors.tolist()
|
||||
top_k = 1
|
||||
nprobe = 1
|
||||
@ -26,9 +26,9 @@ index_params = {'index_type': IndexType.IVFLAT, 'nlist': 16384}
|
||||
class TestMixBase:
|
||||
|
||||
# TODO: enable
|
||||
def _test_search_during_createIndex(self, args):
|
||||
def test_search_during_createIndex(self, args):
|
||||
loops = 100000
|
||||
table = "test_search_during_createIndex"
|
||||
table = gen_unique_str()
|
||||
query_vecs = [vectors[0], vectors[1]]
|
||||
uri = "tcp://%s:%s" % (args["ip"], args["port"])
|
||||
id_0 = 0; id_1 = 0
|
||||
@ -54,6 +54,7 @@ class TestMixBase:
|
||||
status, ids = milvus_instance.add_vectors(table, vectors)
|
||||
logging.getLogger().info(status)
|
||||
def search(milvus_instance):
|
||||
logging.getLogger().info("In search vectors")
|
||||
for i in range(loops):
|
||||
status, result = milvus_instance.search_vectors(table, top_k, nprobe, query_vecs)
|
||||
logging.getLogger().info(status)
|
||||
@ -69,6 +70,7 @@ class TestMixBase:
|
||||
p_create.start()
|
||||
p_create.join()
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_mix_multi_tables(self, connect):
|
||||
'''
|
||||
target: test functions with multiple tables of different metric_types and index_types
|
||||
@ -77,6 +79,7 @@ class TestMixBase:
|
||||
expected: status ok
|
||||
'''
|
||||
nq = 10000
|
||||
nlist= 16384
|
||||
vectors = gen_vectors(nq, dim)
|
||||
table_list = []
|
||||
idx = []
|
||||
@ -112,17 +115,17 @@ class TestMixBase:
|
||||
|
||||
#create index
|
||||
for i in range(10):
|
||||
index_params = {'index_type': IndexType.FLAT, 'nlist': 16384}
|
||||
index_params = {'index_type': IndexType.FLAT, 'nlist': nlist}
|
||||
status = connect.create_index(table_list[i], index_params)
|
||||
assert status.OK()
|
||||
status = connect.create_index(table_list[30 + i], index_params)
|
||||
assert status.OK()
|
||||
index_params = {'index_type': IndexType.IVFLAT, 'nlist': 16384}
|
||||
index_params = {'index_type': IndexType.IVFLAT, 'nlist': nlist}
|
||||
status = connect.create_index(table_list[10 + i], index_params)
|
||||
assert status.OK()
|
||||
status = connect.create_index(table_list[40 + i], index_params)
|
||||
assert status.OK()
|
||||
index_params = {'index_type': IndexType.IVF_SQ8, 'nlist': 16384}
|
||||
index_params = {'index_type': IndexType.IVF_SQ8, 'nlist': nlist}
|
||||
status = connect.create_index(table_list[20 + i], index_params)
|
||||
assert status.OK()
|
||||
status = connect.create_index(table_list[50 + i], index_params)
|
||||
|
||||
@ -54,7 +54,7 @@ class TestSearchBase:
|
||||
"""
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=[1, 99, 101, 1024, 2048, 2049]
|
||||
params=[1, 99, 1024, 2048, 2049]
|
||||
)
|
||||
def get_top_k(self, request):
|
||||
yield request.param
|
||||
@ -220,7 +220,6 @@ class TestSearchBase:
|
||||
scope="function",
|
||||
params=[
|
||||
(get_last_day(2), get_last_day(1)),
|
||||
(get_last_day(2), get_current_day()),
|
||||
(get_next_day(1), get_next_day(2))
|
||||
]
|
||||
)
|
||||
@ -482,8 +481,9 @@ class TestSearchBase:
|
||||
"""
|
||||
|
||||
class TestSearchParamsInvalid(object):
|
||||
index_params = random.choice(gen_index_params())
|
||||
logging.getLogger().info(index_params)
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
logging.getLogger().info(index_param)
|
||||
|
||||
def init_data(self, connect, table, nb=100):
|
||||
'''
|
||||
@ -528,7 +528,7 @@ class TestSearchParamsInvalid(object):
|
||||
def get_top_k(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.level(1)
|
||||
def test_search_with_invalid_top_k(self, connect, table, get_top_k):
|
||||
'''
|
||||
target: test search fuction, with the wrong top_k
|
||||
@ -539,9 +539,12 @@ class TestSearchParamsInvalid(object):
|
||||
logging.getLogger().info(top_k)
|
||||
nprobe = 1
|
||||
query_vecs = gen_vectors(1, dim)
|
||||
with pytest.raises(Exception) as e:
|
||||
if isinstance(top_k, int):
|
||||
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
|
||||
res = connect.server_version()
|
||||
assert not status.OK()
|
||||
else:
|
||||
with pytest.raises(Exception) as e:
|
||||
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_search_with_invalid_top_k_ip(self, connect, ip_table, get_top_k):
|
||||
@ -554,10 +557,12 @@ class TestSearchParamsInvalid(object):
|
||||
logging.getLogger().info(top_k)
|
||||
nprobe = 1
|
||||
query_vecs = gen_vectors(1, dim)
|
||||
with pytest.raises(Exception) as e:
|
||||
if isinstance(top_k, int):
|
||||
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
|
||||
res = connect.server_version()
|
||||
|
||||
assert not status.OK()
|
||||
else:
|
||||
with pytest.raises(Exception) as e:
|
||||
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
|
||||
"""
|
||||
Test search table with invalid nprobe
|
||||
"""
|
||||
@ -568,7 +573,7 @@ class TestSearchParamsInvalid(object):
|
||||
def get_nprobes(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.level(1)
|
||||
def test_search_with_invalid_nrpobe(self, connect, table, get_nprobes):
|
||||
'''
|
||||
target: test search fuction, with the wrong top_k
|
||||
@ -579,7 +584,7 @@ class TestSearchParamsInvalid(object):
|
||||
nprobe = get_nprobes
|
||||
logging.getLogger().info(nprobe)
|
||||
query_vecs = gen_vectors(1, dim)
|
||||
if isinstance(nprobe, int) and nprobe > 0:
|
||||
if isinstance(nprobe, int):
|
||||
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
|
||||
assert not status.OK()
|
||||
else:
|
||||
@ -597,7 +602,7 @@ class TestSearchParamsInvalid(object):
|
||||
nprobe = get_nprobes
|
||||
logging.getLogger().info(nprobe)
|
||||
query_vecs = gen_vectors(1, dim)
|
||||
if isinstance(nprobe, int) and nprobe > 0:
|
||||
if isinstance(nprobe, int):
|
||||
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
|
||||
assert not status.OK()
|
||||
else:
|
||||
@ -614,7 +619,7 @@ class TestSearchParamsInvalid(object):
|
||||
def get_query_ranges(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.level(1)
|
||||
def test_search_flat_with_invalid_query_range(self, connect, table, get_query_ranges):
|
||||
'''
|
||||
target: test search fuction, with the wrong query_range
|
||||
|
||||
@ -178,6 +178,7 @@ class TestTable:
|
||||
assert res.table_name == table_name
|
||||
assert res.metric_type == MetricType.L2
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_table_describe_table_name_ip(self, connect):
|
||||
'''
|
||||
target: test describe table created with correct params
|
||||
@ -266,6 +267,7 @@ class TestTable:
|
||||
status = connect.delete_table(table)
|
||||
assert not assert_has_table(connect, table)
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_delete_table_ip(self, connect, ip_table):
|
||||
'''
|
||||
target: test delete table created with correct params
|
||||
@ -335,7 +337,6 @@ class TestTable:
|
||||
time.sleep(2)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_delete_create_table_repeatedly_ip(self, connect):
|
||||
'''
|
||||
target: test delete and create the same table repeatedly
|
||||
@ -587,25 +588,25 @@ class TestTable:
|
||||
"""
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=gen_index_params()
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_index_params(self, request, args):
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
return request.param
|
||||
|
||||
@pytest.mark.level(1)
|
||||
def test_preload_table(self, connect, table, get_index_params):
|
||||
index_params = get_index_params
|
||||
def test_preload_table(self, connect, table, get_simple_index_params):
|
||||
index_params = get_simple_index_params
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.preload_table(table)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.level(1)
|
||||
def test_preload_table_ip(self, connect, ip_table, get_index_params):
|
||||
index_params = get_index_params
|
||||
def test_preload_table_ip(self, connect, ip_table, get_simple_index_params):
|
||||
index_params = get_simple_index_params
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
status = connect.preload_table(ip_table)
|
||||
@ -613,19 +614,21 @@ class TestTable:
|
||||
|
||||
@pytest.mark.level(1)
|
||||
def test_preload_table_not_existed(self, connect, table):
|
||||
table_name = gen_unique_str("test_preload_table_not_existed")
|
||||
index_params = random.choice(gen_index_params())
|
||||
table_name = gen_unique_str()
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status, ids = connect.add_vectors(table, vectors)
|
||||
status = connect.create_index(table, index_params)
|
||||
status = connect.create_index(table, index_param)
|
||||
status = connect.preload_table(table_name)
|
||||
assert not status.OK()
|
||||
|
||||
@pytest.mark.level(1)
|
||||
@pytest.mark.level(2)
|
||||
def test_preload_table_not_existed_ip(self, connect, ip_table):
|
||||
table_name = gen_unique_str("test_preload_table_not_existed")
|
||||
index_params = random.choice(gen_index_params())
|
||||
table_name = gen_unique_str()
|
||||
nlist = 16384
|
||||
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
|
||||
status, ids = connect.add_vectors(ip_table, vectors)
|
||||
status = connect.create_index(ip_table, index_params)
|
||||
status = connect.create_index(ip_table, index_param)
|
||||
status = connect.preload_table(table_name)
|
||||
assert not status.OK()
|
||||
|
||||
@ -634,7 +637,7 @@ class TestTable:
|
||||
status = connect.preload_table(table)
|
||||
assert status.OK()
|
||||
|
||||
@pytest.mark.level(1)
|
||||
@pytest.mark.level(2)
|
||||
def test_preload_table_no_vectors_ip(self, connect, ip_table):
|
||||
status = connect.preload_table(ip_table)
|
||||
assert status.OK()
|
||||
@ -656,6 +659,7 @@ class TestTableInvalid(object):
|
||||
def get_table_name(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
def test_create_table_with_invalid_tablename(self, connect, get_table_name):
|
||||
table_name = get_table_name
|
||||
param = {'table_name': table_name,
|
||||
@ -691,6 +695,7 @@ class TestCreateTableDimInvalid(object):
|
||||
def get_dim(self, request):
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.timeout(5)
|
||||
def test_create_table_with_invalid_dimension(self, connect, get_dim):
|
||||
dimension = get_dim
|
||||
@ -726,7 +731,7 @@ class TestCreateTableIndexSizeInvalid(object):
|
||||
'dimension': dim,
|
||||
'index_file_size': file_size,
|
||||
'metric_type': MetricType.L2}
|
||||
if isinstance(file_size, int) and file_size > 0:
|
||||
if isinstance(file_size, int):
|
||||
status = connect.create_table(param)
|
||||
assert not status.OK()
|
||||
else:
|
||||
@ -777,7 +782,7 @@ def preload_table(connect, **params):
|
||||
return status
|
||||
|
||||
def has(connect, **params):
|
||||
status = assert_has_table(connect, params["table_name"])
|
||||
status, result = connect.has_table(params["table_name"])
|
||||
return status
|
||||
|
||||
def show(connect, **params):
|
||||
@ -801,7 +806,7 @@ def create_index(connect, **params):
|
||||
return status
|
||||
|
||||
func_map = {
|
||||
# 0:has,
|
||||
0:has,
|
||||
1:show,
|
||||
10:create_table,
|
||||
11:describe,
|
||||
|
||||
@ -23,7 +23,7 @@ class TestTableCount:
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=[
|
||||
100,
|
||||
1,
|
||||
5000,
|
||||
100000,
|
||||
],
|
||||
@ -36,9 +36,9 @@ class TestTableCount:
|
||||
"""
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=gen_index_params()
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_index_params(self, request, args):
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
@ -58,14 +58,14 @@ class TestTableCount:
|
||||
status, res = connect.get_table_row_count(table)
|
||||
assert res == nb
|
||||
|
||||
def test_table_rows_count_after_index_created(self, connect, table, get_index_params):
|
||||
def test_table_rows_count_after_index_created(self, connect, table, get_simple_index_params):
|
||||
'''
|
||||
target: test get_table_row_count, after index have been created
|
||||
method: add vectors in db, and create index, then calling get_table_row_count with correct params
|
||||
expected: get_table_row_count raise exception
|
||||
'''
|
||||
nb = 100
|
||||
index_params = get_index_params
|
||||
index_params = get_simple_index_params
|
||||
vectors = gen_vectors(nb, dim)
|
||||
res = connect.add_vectors(table_name=table, records=vectors)
|
||||
time.sleep(add_time_interval)
|
||||
@ -91,7 +91,7 @@ class TestTableCount:
|
||||
assert the value returned by get_table_row_count method is equal to 0
|
||||
expected: the count is equal to 0
|
||||
'''
|
||||
table_name = gen_unique_str("test_table")
|
||||
table_name = gen_unique_str()
|
||||
param = {'table_name': table_name,
|
||||
'dimension': dim,
|
||||
'index_file_size': index_file_size}
|
||||
@ -142,8 +142,8 @@ class TestTableCount:
|
||||
nq = 100
|
||||
vectors = gen_vectors(nq, dim)
|
||||
table_list = []
|
||||
for i in range(50):
|
||||
table_name = gen_unique_str('test_table_rows_count_multi_tables')
|
||||
for i in range(20):
|
||||
table_name = gen_unique_str()
|
||||
table_list.append(table_name)
|
||||
param = {'table_name': table_name,
|
||||
'dimension': dim,
|
||||
@ -152,7 +152,7 @@ class TestTableCount:
|
||||
connect.create_table(param)
|
||||
res = connect.add_vectors(table_name=table_name, records=vectors)
|
||||
time.sleep(2)
|
||||
for i in range(50):
|
||||
for i in range(20):
|
||||
status, res = connect.get_table_row_count(table_list[i])
|
||||
assert status.OK()
|
||||
assert res == nq
|
||||
@ -166,7 +166,7 @@ class TestTableCountIP:
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=[
|
||||
100,
|
||||
1,
|
||||
5000,
|
||||
100000,
|
||||
],
|
||||
@ -180,9 +180,9 @@ class TestTableCountIP:
|
||||
|
||||
@pytest.fixture(
|
||||
scope="function",
|
||||
params=gen_index_params()
|
||||
params=gen_simple_index_params()
|
||||
)
|
||||
def get_index_params(self, request, args):
|
||||
def get_simple_index_params(self, request, args):
|
||||
if "internal" not in args:
|
||||
if request.param["index_type"] == IndexType.IVF_SQ8H:
|
||||
pytest.skip("sq8h not support in open source")
|
||||
@ -202,14 +202,14 @@ class TestTableCountIP:
|
||||
status, res = connect.get_table_row_count(ip_table)
|
||||
assert res == nb
|
||||
|
||||
def test_table_rows_count_after_index_created(self, connect, ip_table, get_index_params):
|
||||
def test_table_rows_count_after_index_created(self, connect, ip_table, get_simple_index_params):
|
||||
'''
|
||||
target: test get_table_row_count, after index have been created
|
||||
method: add vectors in db, and create index, then calling get_table_row_count with correct params
|
||||
expected: get_table_row_count raise exception
|
||||
'''
|
||||
nb = 100
|
||||
index_params = get_index_params
|
||||
index_params = get_simple_index_params
|
||||
vectors = gen_vectors(nb, dim)
|
||||
res = connect.add_vectors(table_name=ip_table, records=vectors)
|
||||
time.sleep(add_time_interval)
|
||||
@ -243,10 +243,8 @@ class TestTableCountIP:
|
||||
status, res = connect.get_table_row_count(ip_table)
|
||||
assert res == 0
|
||||
|
||||
# TODO: enable
|
||||
@pytest.mark.level(2)
|
||||
@pytest.mark.timeout(20)
|
||||
def _test_table_rows_count_multiprocessing(self, connect, ip_table, args):
|
||||
@pytest.mark.timeout(60)
|
||||
def test_table_rows_count_multiprocessing(self, connect, ip_table, args):
|
||||
'''
|
||||
target: test table rows_count is correct or not with multiprocess
|
||||
method: create table and add vectors in it,
|
||||
@ -286,7 +284,7 @@ class TestTableCountIP:
|
||||
nq = 100
|
||||
vectors = gen_vectors(nq, dim)
|
||||
table_list = []
|
||||
for i in range(50):
|
||||
for i in range(20):
|
||||
table_name = gen_unique_str('test_table_rows_count_multi_tables')
|
||||
table_list.append(table_name)
|
||||
param = {'table_name': table_name,
|
||||
@ -296,7 +294,7 @@ class TestTableCountIP:
|
||||
connect.create_table(param)
|
||||
res = connect.add_vectors(table_name=table_name, records=vectors)
|
||||
time.sleep(2)
|
||||
for i in range(50):
|
||||
for i in range(20):
|
||||
status, res = connect.get_table_row_count(table_list[i])
|
||||
assert status.OK()
|
||||
assert res == nq
|
||||
@ -26,9 +26,9 @@ def gen_vector(nb, d, seed=np.random.RandomState(1234)):
|
||||
return xb.tolist()
|
||||
|
||||
|
||||
def gen_unique_str(str=None):
|
||||
def gen_unique_str(str_value=None):
|
||||
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
|
||||
return prefix if str is None else str + "_" + prefix
|
||||
return "test_"+prefix if str_value is None else str_value+"_"+prefix
|
||||
|
||||
|
||||
def get_current_day():
|
||||
@ -449,10 +449,11 @@ def gen_index_params():
|
||||
|
||||
return gen_params(index_types, nlists)
|
||||
|
||||
|
||||
def gen_simple_index_params():
|
||||
index_params = []
|
||||
index_types = [IndexType.FLAT, IndexType.IVFLAT, IndexType.IVF_SQ8, IndexType.IVF_SQ8H]
|
||||
nlists = [16384]
|
||||
nlists = [1024]
|
||||
|
||||
def gen_params(index_types, nlists):
|
||||
return [ {"index_type": index_type, "nlist": nlist} \
|
||||
@ -475,7 +476,7 @@ if __name__ == "__main__":
|
||||
table = "test"
|
||||
|
||||
|
||||
file_name = '/poc/yuncong/ann_1000m/query.npy'
|
||||
file_name = 'query.npy'
|
||||
data = np.load(file_name)
|
||||
vectors = data[0:nq].tolist()
|
||||
# print(vectors)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user