MS-143 merge

Former-commit-id: b8eeda3dcb955a065f4adc933d53c16a10b6f04a
This commit is contained in:
xj.lin 2019-07-02 19:28:50 +08:00
commit 619a800766
106 changed files with 7220 additions and 2660 deletions

6
cpp/.gitignore vendored
View File

@ -1,4 +1,10 @@
milvus/
conf/server_config.yaml
conf/log_config.conf
version.h
megasearch/
lcov_out/
base.info
output.info
output_new.info
server.info

View File

@ -2,7 +2,20 @@
Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.3.0 (TBD)
# Milvus 0.3.1 (2019-07-10)
## Bug
## Improvement
## New Feature
## Task
- MS-125 - Create 0.3.1 release branch
# Milvus 0.3.0 (2019-06-30)
## Bug
- MS-104 - Fix unittest lcov execution error
@ -11,6 +24,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-89 - Fix compile failed, libgpufaiss.a link missing
- MS-90 - Fix arch match incorrect on ARM
- MS-99 - Fix compilation bug
- MS-110 - Avoid huge file size
## Improvement
- MS-82 - Update server startup welcome message
@ -19,6 +33,11 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-22 - Enhancement for MemVector size control
- MS-92 - Unify behavior of debug and release build
- MS-98 - Install all unit test to installation directory
- MS-115 - Change is_startup of metric_config switch from true to on
- MS-122 - Archive criteria config
- MS-124 - HasTable interface
- MS-126 - Add more error code
- MS-128 - Change default db path
## New Feature
@ -40,6 +59,9 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-85 - add NetIO metric
- MS-96 - add new query interface for specified files
- MS-97 - Add S3 SDK for MinIO Storage
- MS-105 - Add MySQL
- MS-130 - Add prometheus_test
- MS-143 - Intergrate Knowhere but not activate
## Task
- MS-74 - Change README.md in cpp

View File

@ -52,7 +52,7 @@ if(MILVUS_VERSION_MAJOR STREQUAL ""
OR MILVUS_VERSION_MINOR STREQUAL ""
OR MILVUS_VERSION_PATCH STREQUAL "")
message(WARNING "Failed to determine Milvus version from git branch name")
set(MILVUS_VERSION "0.3.0")
set(MILVUS_VERSION "0.3.1")
endif()
message(STATUS "Build version = ${MILVUS_VERSION}")
@ -113,20 +113,13 @@ link_directories(${MILVUS_BINARY_DIR})
set(MILVUS_ENGINE_INCLUDE ${PROJECT_SOURCE_DIR}/include)
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
#set(MILVUS_THIRD_PARTY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
#set(MILVUS_THIRD_PARTY_BUILD ${CMAKE_CURRENT_SOURCE_DIR}/third_party/build)
add_compile_definitions(PROFILER=${PROFILER})
include_directories(${MILVUS_ENGINE_INCLUDE})
include_directories(${MILVUS_ENGINE_SRC})
#include_directories(${MILVUS_THIRD_PARTY_BUILD}/include)
link_directories(${CMAKE_CURRRENT_BINARY_DIR})
#link_directories(${MILVUS_THIRD_PARTY_BUILD}/lib)
#link_directories(${MILVUS_THIRD_PARTY_BUILD}/lib64)
#execute_process(COMMAND bash build.sh
# WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
add_subdirectory(src)
@ -135,12 +128,19 @@ if (BUILD_COVERAGE STREQUAL "ON")
endif()
if (BUILD_UNIT_TEST)
if (BUILD_UNIT_TEST STREQUAL "ON")
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
endif(BUILD_UNIT_TEST)
endif()
add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean)
if("${MILVUS_DB_PATH}" STREQUAL "")
set(MILVUS_DB_PATH "/tmp/milvus")
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf)
#install
install(FILES
scripts/start_server.sh
@ -152,5 +152,12 @@ install(FILES
conf/log_config.conf
DESTINATION
conf)
install(FILES
./Milvus-EULA-cn.md
./Milvus-EULA-en.md
DESTINATION
license
)
config_summary()

119
cpp/Milvus-EULA-cn.md Normal file
View File

@ -0,0 +1,119 @@
# **Milvus**终端用户授权许可条款及条件
#### 2019-06-30 版
本条款和条件(下称“本协议”)适用于使用由上海赜睿信息科技有限公司(下称“**ZILLIZ**”)所提供的Milvus产品(参见如下定义) 的用户。
**请仔细阅读如下条款:**
**若您(下称“您”或“用户”)代表某公司或者其他机构使用任何产品时, 您特此陈述您作为该公司或该等其他机构的员工或代理,您有权代表该公司或该等其他机构接受本协议项下所要求的全部条款和条件。**
**若使用任何产品,您知晓并同意:**
**(A)您已阅读本协议中所有的条款和条件;**
**(B)您已理解本协议中所有的条款和条件;**
**(C)您已同意本协议中所有条款和条件对您具有法律约束力。**
**如果您不同意本协议所述条款和条件中的任意内容,则可以选择不使用产品的任何部分。**
**本协议的“生效日期”是指您第一次下载任何产品的日期。**
1. **产品**,指本协议项下任何 **ZILLIZ** 的Milvus产品和软件包括 Milvus向量检索数据库Docker版与其相关的升级、更新、故障修复或修改版本统称“更新软件”。无论本协议是否另有规定
a仅Milvus向量检索数据库Docker版是免费授权用户的版本且ZILLIZ保留收回该授权的权力
b任何使用或者试用Milvus向量检索数据库Docker版的个人与组织需要通过support@zilliz.com向ZILLIZ告知个人或者组织的身份、联系方式以及使用Milvus的目的。
c制作和使用额外的副本仅限于必要的备份目的。
2. **全部协议**,本协议包括本授权许可条款及条件以及任何[Milvus官方网站](https://milvus.io)展示或者网页链接所附或引用的全部条款。本协议是双方就相关事项达成的完整协议,取代 **ZILLIZ** 与用户之间就本条款相关事项所达成的其他任何协议,无论是口头的还是书面的。
3. **使用许可****ZILLIZ** 授予用户非排他性的、不可转让的、非可再授权的、可撤回的和有限的许可进行访问和使用第1条所定义的产品该访问和使用许可仅限于用户内部使用之目的。通过电子下载或其他经许可的来源获得产品的用户均应受限于本协议的内容。
4. **许可限制**,除非本协议另有明文规定,否则用户将不被允许:
a修改、翻译或制造产品的衍生作品
b反向编译、反向工程、破解产品的任何部分或试图发现有关产品的任何源代码、基本理念或运算方法 c销售、分派、再授权、出租、出借、出质、提供或另行翻译全部或部分产品
d制造、获取非法制造的、再版或复制产品
e删除或更改与产品相关联的任何商标、标志、版权或其他专有标
f不得在没有 **ZILLIZ** 明确书面授权的情况下,使用或许可他人使用产品为第三方提供服务,无论是在产品服务过程中使用或采用分时的方式;
g引起或许可任何其他方进行上述任何一种禁止行为。
5. **所有权****ZILLIZ** 和用户在本协议项下的许可需明确,**ZILLIZ** 有以下各项的全部权利、所有权和相关利益a产品包括但不限于任何更新软件、修订版本或其衍生作品
b**ZILLIZ** 根据本协议提供任何服务的过程中或作为其提供服务的结果,由 **ZILLIZ** 发现、 产生或发展出来的所有的概念、发明、发现、改进、信息、创意作品等;
c前述各项所含的任何知识产权权利。在本协议项下“知识产权”是指在任何管辖区域经申请和注册获得认可和保护的全部专利、版权、道德权利、商标、商业秘密和任何其他形式的权利。**ZILLIZ** 与用户同意,在受限于法律法规规定及本协议全部条款和条件的前提下,用户拥有使用产品而产生的数据的权利、所有权等相关利益。本协议中无任何默示许可,**ZILLIZ** 保留本协议项下未明确授权的全部权利。除非本协议明确约定,**ZILLIZ** 在本协议下未授予用户任何许可权利,无论是通过暗示、默许或其他方式。
6. **保密**,保密信息是指,无论是在本协议生效前或生效后,由 **ZILLIZ** 披露给用户的与本协议或与 **ZILLIZ** 相关的所有信息(无论是以口头、书面或其他有形、无形的形式)。保密信息包括但不限于,商业计划的内容、产品、发明、设计图纸、财务计划、计算机程序、发明、用户信息、战略和其他类似信息。在本协议期限内,除非获得明确许可, 用户需保证保密信息的秘密性,并确保不会使用上述保密信息。用户将采用与保护其自身保密信息的同等谨慎程度(不论在何种情况下均不低于合理的谨慎程度)来保护 **ZILLIZ** 的保密信息,来避免使得保密信息被未经授权的使用和披露。保密信息只供用户根据本协议规定使用产品之目的而使用。此外,用户将:
a除非用户为了根据本协议的规定而使用产品之目的外不得以任何形式复制、使用或披露保密信息 b只向为确保用户可根据本协议使用产品而必需知道该保密信息的员工和顾问披露保密信息前提是上述员工和顾问已签署了包含保密义务不低于本条所述内容的保密协议。
保密信息不包括下列信息:
a 非因用户过错违反本协议导致已进入公共领域可被第三方获取的;
b 用户能合理证明其在通过 **ZILLIZ** 获得之前已知晓的;
c用户能证明没有使用或参考该保密信息而独立获得的
d用户从其他无披露限制或无保密义务的第三方获得的。如无另行说明由用户提供给 **ZILLIZ** 有关产品的任何建议、评论或者其他反馈(统称“反馈信息”)将同样构成保密信息。
此外,**ZILLIZ** 有权使用、披露、复制、许可和利用上述反馈信息,而无需承担任何知识产权负担或其他任何形式的义务或限制。根据相关法律法规,与本协议的履行和用户使用 **ZILLIZ** 产品相关的情况下:
a**ZILLIZ** 同意不会要求用户提供任何个人身份信息;
b用户同意不提供任何个人身份信息给 **ZILLIZ**
7. **免责声明**,用户陈述、保证及承诺如下:
a其所有员工和顾问都将遵守本协议的全部条款
b在履行本协议时将遵守全部可适用的政府部门颁发的法律、法规、规章、命令和其他要求无论是现行有效还是之后生效的
无论本协议是否另有规定,用户将持续对其雇员或顾问的全部作为或不作为承担责任,如同该等作为或不作为系其自身所为。
产品系按照原状或现状提供给用户,不含任何形式的陈述、保证、 承诺或条件。**ZILLIZ** 及其供应商不保证任何产品将无任何故障、错误或漏洞。**ZILLIZ** 和其供应商不为产品的如下内容提供任何陈述和保证(无论是明示或暗示,口头或书面),不论该内容是否依据法律之规定,行业惯例,交易习惯或其他原因而要求的:
a保证适销性
b保证可适用于任何目的不论 **ZILLIZ** 是否知晓、应当知晓、被建议或另行得知该目的);
c保证不侵权和拥有全部所有权。用户已明确知悉并同意产品上无任何陈述和保证。此外鉴于进行入侵和网络攻击的新技术在不断发展**ZILLIZ** 并不保证产品或产品所使用的系统或网络将免于任何入侵或攻击。
8. **损害赔偿**,用户应赔偿、保护或使得 **ZILLIZ** 及其董事、高管、 雇员、供应商、顾问、承包商和代理商(统称为“**ZILLIZ **受保障方”)免受所有现存或潜在的针对 **ZILLIZ** 受保障方因提起请求、诉讼或其他程序而引起的要求其赔偿损害损失、支付费用、罚款、调解、 损失费用等支出(包括但不限于律师费、费用、罚款、利息和垫付款),用户承担上述责任的前提是该请求、诉讼或其他程序,不论是否成功系在如下情况发生时导致、引起的,或以任何形式与下述情况相关:
a任何对本协议的违反包括但不限于任何违反用户陈述和保证或约定的情况
b用户过失或故意产生的过错行为
c引起争议的数据和信息系在产品的使用过程中产生或收集的。
9. **责任限制**,除了 **ZILLIZ** 存在欺诈或故意的过错行为,在任何情况下:
a**ZILLIZ** 都不会赔偿用户或任何第三方的因本协议或产品(包括用户使用或无法使用产品的情况)而遭受的任何利润损失、数 据损失、使用损失、收入损失、商誉损失、任何经营活动的中断,任何其他商业损害或损失,或任何间接的、特殊的、附带的、惩戒性、惩罚性或伴随的损失,不论上述损失系因合同、侵权、严格责任或其他原因而确认的,即使 **ZILLIZ** 已被通知或因其他可能的渠道知晓上述损失发生的可能性;
b**ZILLIZ** 因本协议所需承担的全部赔偿责任不应超过用户已支付或将支付给 **ZILLIZ** 的全部款项总额(若有),多项请求亦不得超过该金额限制。上述限制、排除情况及声明应在相关法律允许的最大范围内得以适用,即便任何补偿无法达到其实质目的。
10. **第三方供应商**,产品可能包括由第三方供应商许可提供的软件或其他代码(下称“第三方软件”)。用户已知悉第三方供应商不对产品或其任何部分提供任何陈述和保证,**ZILLIZ** 不承担因产品或用户对第三方软件的使用或不能使用的情况而产生的任何责任。
11. **诊断和报告**,用户了解并同意该产品包含诊断功能作为其默认配置。 诊断功能用于收集有关使用环境和产品使用过程中的配置文件、节点数、 软件版本、日志文档和其他信息,并将上述信息报告给 **ZILLIZ** 用于提前识别潜在的支持问题、了解用户的使用环境、并提高产品的使用性能。虽然用户可以选择更改诊断功能来禁用自动定时报告或仅用于报告服务记录,但用户需同意,每季度须至少运行一次诊断功能并将结果报告给**ZILLIZ**。
12. **终止**,本协议期限从生效之日起直到 **ZILLIZ** 网站规定的期限终止除非本协议因用户违反本协议中条款而提前终止。无论本协议是否另有规定在用户存在违反第3、4、5或7条时**ZILLIZ**有权立即终止本协议。本协议期满或提前终止时:
a根据本协议所授予给用户的所有权利将立即终止在此情况下用户应立即停止使用产品
b 用户应及时将届时仍由其占有的所有保密信息及其副本(包括但不限于产品)交还给 **ZILLIZ**,或根据 **ZILLIZ** 的自行审慎决定及指示, 销毁该等保密信息全部副本,未经 **ZILLIZ** 书面同意,用户不得擅自保留任何由 **ZILLIZ** 提供的保密信息及其副本。
13. **第三方资源** **ZILLIZ** 供应的产品可能包括对其他网站、内容或资源的超链接(下称“第三方资源”),且 **ZILLIZ** 此类产品的正常使用可能依赖于第三方资源的可用性。**ZILLIZ** 无法控制任何第三方资源。用户承认并同意,**ZILLIZ** 不就第三方资源的可用性及安全性承担任何责任,也不对该等第三方资源所涉及的或从其中获得的任何广告、产品或其他材料提供保证。用户承认并同意,**ZILLIZ** 不应因第三方资源的可用性及安全性、或用户依赖于第三方资源所涉及的或从其中获得的任何广告、产品或其他材料的完整性、准确性及存续而可能遭受的损失或损害承担任何责任。
14. **其他**,本协议全部内容均在中华人民共和国境内履行,受中华人民共和国法律管辖并根据其解释(但不适用相关冲突法的法律条款)。用 **ZILLIZ** 同意与本协议有关的任何争议将向上海市徐汇区人民法院提出,且不可撤销无条件的同意上述法院对因本协议提起的全部诉讼、争议拥有排他的管辖权。一旦确定任何条款无效、非法或无法执行, **ZILLIZ** 保留修改和解释该条款的权利。任何需要发送给用户的通知如公布在 **ZILLIZ** 的网站上则被视为已有效、合法地发送给用户。除了本合同项下应支付款项的义务外,任何一方将不对因不可抗力而导致的无法合理控制的全部或部分未能履行或延迟履行本协议的行为负责, 不可抗力包括但不限于火灾、暴风雨、洪水、地震、内乱、电信中断、 电力中断或其他基础设施的中断、**ZILLIZ** 使用的服务提供商存在问题导致服务中断或终止、罢工、故意毁坏事件、电缆被切断、病毒入侵或其他任意第三方故意或非法的行为引起的其他类似事件。在上述迟延履行情况出现时,可延迟履行协议的时间为因上述原因引起的延迟时间。 本协议另有明确规定外,本协议所要求或认可的通知或通讯均需以书面形式经一方有权代表签署或授权并以直接呈递、隔夜快递,经确认的电子邮件发送,经确认的传真或邮寄挂号信、挂号邮件保留回单等方式送达。对本协议的任何修改、补充或删除或权利放弃,必须通过书面由双方适当授权的代表签署确认后方为有效。任何一方对任何权利或救济的不履行或迟延履行(部分或全部)不构成对该等权利或救济的放弃,也不影响任何其他权利或救济。本协议项下的所有权利主张和救济均可为累积的且不排除本协议中包含的或法律所规定的其他任何权利或救济。 对本协议中任何一项违约责任的豁免或延迟行使任何权利,并不构成对其他后续违约责任的豁免。

129
cpp/Milvus-EULA-en.md Normal file
View File

@ -0,0 +1,129 @@
# ZILLIZ End-User License Agreement
#### Last updated: 2019-06-30
This End-user License Agreement ("Agreement") is applicable to all users who uses Milvus provided by ZILLIZ company.
**Please read this agreement carefully before clicking the I Agree button, downloading or using this Application.**
**If you ("You" or "User") use any product on behalf of a company or other organization, you hereby state that you are an employee or agent of the company or such other institution, and you have the right to represent the company or such institutions to accept all the terms and conditions required under this Agreement. **
**If you use any product, you acknowledge and agree:**
**(A) You have read all the terms and conditions in the Agreement;**
**(B) You have understand all the terms and conditions in the Agreement;**
**(C) You have agreed that all the terms and conditions of this Agreement are legally binding on you.**
**If you do not agree to any of the terms and conditions set forth in this Agreement, you may choose not to use any part of the product.**
**This agreement takes effect immediately the first time you download the application**.
1. **Product**. In this Agreement, it refers to Milvus and other related software products of **ZILLIZ**, including Milvus vector indexing database and its updates, higher versions, maintenance or patch releases ("Updated Software").
(a) Only the Docker version of Milvus vector indexing database is granted free to the User. **ZILLIZ** retains the right to revoke this grant;
(b) Any person or organization that intend to use or try the Docker version of Milvus vector indexing database need to inform **ZILLIZ** of the personal identity, contact information and purposes of using the Product by sending an email to: support@zilliz.com;
(cMaking or using additional copy of the Product is only restricted to necessary copy purposes.
2. **Related Agreements**. The Related Agreements includes this Agreement and all other related terms and conditions that appear in [Milvus official website](https://milvus.io). This Agreement is the entire and final agreement that replaces all other terms agreed between the User and **ZILLIZ** about issues listed here, oral or written.
3. **License Grant**. **ZILLIZ** grant You a revocable, non-exclusive, non-transferable limited right to install and use the Application defined above for your personal, non-commercial purposes. The User who uses the Application through downloading and other permitted channels are also subject to this Agreement;
4. **Restrictions on Use.** You shall use the Application in accordance with the terms in the Agreement, and shall not:
aMake any modification, translation or derivative work from the Application;
bDecompile, reverse engineer, disassemble, attempt to derive the source code or algorithm of the Application;
cSell, distribute, license re-granting or provide translation of the whole or part of the Application;
dUse the Application for creating a product, service or software.
eRemove, alter or obscure any proprietary notice, trademark, or copyright of the Company and Application;
fInstall or use the Application to provide service to third-party partners, without acquiring formal grant of **ZILLIZ** ;
gPerform or permit any behaviors that might lead to one of the above prohibited actions.
5. **Ownership**. **ZILLIZ** enjoys the ownership of the following
aProducts (includes but is not restricted to any updated software, patch releases, or derivative products);
bAll concepts, innovations, discoveries, improvements, information, or creative products developed and discovered by **ZILLIZ** as a result of or arising out of the service providing process;
cIntellectual property rights of the above mentioned products and innovations. In this Agreement, "Intellectual Property" refers to trademarks, patents, designations of origin, industrial designs and models and copyright. **ZILLIZ** and the User agree that the User enjoy all the rights to use data produced by using the Product, while **ZILLIZ** keeps all other rights not explicitly stated in the Agreement. Unless otherwise stated, **ZILLIZ** has not granted any additional rights to Users, either implied, acquiesced or in other ways.
6. **Non-disclosure**. Confidential Information refers to any and all information revealed to the User by **ZILLIZ**, either oral or written, tangible or intangible, before or after the Agreement takes effect. Confidential information includes but is not restricted to business plans and strategies, product, innovations, design papers, financial plans, computer programs, User information, etc. Within the term of this Agreement, unless granted definite permission, the User shall hold and maintain the Confidential Information in strictest confidence for the sole and exclusive benefit of **ZILLIZ** and using the Product. In addition:
aYou shall not copy, use or disclose Confidential Information for purposes other than using the Product agreed in this Agreement;
bYou shall carefully restrict access to Confidential Information to employees, contractors, and third parties as is reasonably required and shall require those persons to sign nondisclosure restrictions at least as protective as those in this Agreement.
Confidential Information does not include:
aInformation that can be obtained by third-parties not due to User's violation of the Agreement;
bInformation that can be proven to be provided to Users not by **ZILLIZ** ;
c Information that are obtained with no reference to Confidential Information;
dInformation the User gets from third-parties that are not subject to non-disclosure agreement. Unless otherwise stated, any comments, suggestions or other feedback ("Feedback Information") about the Product by the User to **ZILLIZ** will also be counted as Confidential Information.
Furthermore, **ZILLIZ** has the right to use, disclose, copy or use above Feedback Information, and bearing no intellectual property burden or restrictions. According to related laws and regulations, during the fulfillment of this Agreement:
a**ZILLIZ** agree not to require the User to provide any information regarding personal identities;
bThe User agree not to provide **ZILLIZ** with any personal information.
7. **Disclaimer of Warranties**. You acknowledge, agree and promise that:
(a) All employees and consultants will obey all terms in the Agreement;
bApplication of the Agreement is subject to all laws, terms, acts, commands and other requirements issued by the government (no matter these laws are in effect now or will be effective in the future).
The User shall be held responsible for all the behaviors in relation to the Application.
The Application is provided on an "As is" or "As available" basis, and that You use or reliance on the Application is at your sole risk and discretion. **ZILLIZ** and its partners make no warranty that the Application will meet all Your requirements and expectations.
**ZILLIZ** and its suppliers hereby disclaim any and all representations, warranties and guaranties regarding the Application, whether expressed, implied or statutory:
aThe implied warranty of merchantability;
bFitness for a particular purpose;
cNon-infringement.
Further more, considering the continuous advancement of Internet hacking and attaching technologies, **ZILLIZ** make no guarantee that the Application or the systems and Internet it uses will be exempt from any hack or attack.
8. **Damages and Penalties**. The User shall pay, protect or prevent **ZILLIZ** and its board members, executives, employees, consultants or representative agencies (**ZILLIZ** Protected Party) from any existing or potential damage loss, fees, penalties and other outgoing payments (include but are not limited to lawyer fees, fines, interests and advance payment) arising out of legal request, litigation or other processes. The prerequisite condition of the above obligations are that the legal request, litigation or process are caused by any of the following situations:
aAny violation of the Agreement;
bUser fault or deliberate behavior;
cControversial data is produced or collected during the usage of the Product.
9. **Limitation of Liability**. Unless due to deliberate fraud or error from **ZILLIZ**, below terms are applicable:
aUnder no circumstances shall **ZILLIZ** be held liable for any profit loss, data loss, revenue loss, termination of operations, any indirect, special, exemplary or consequential damages arising out or in connection with Your access or use of the Application;
bWithout limiting the generality of the foregoing, **ZILLIZ**'s aggregate liability to You shall not exceed the total amount of money You already paid or will pay to **ZILLIZ** (if any).
10. **Third-party Suppliers**. The User acknowledge that no statement and guarantee should be expected from Third-party Suppliers about the Product or its components. **ZILLIZ** hold no obligations to the Users' usage of the softwares provided by third-party Suppliers.
11. **Diagnosis and Report**. The User know and agree that Diagnosis is part of the configuration of the Product. Diagnosis is used to collect the configuration files, node numbers, software version, logs and related information, and send a Report to **ZILLIZ** to recognize potential support problems, get to know User environment, and to enhance product features. Although You can choose to turn off the Diagnosis function of automatic report sending, however, You shall run the Diagnosis at least once every quarter and send the Report to **ZILLIZ**.
12. **Termination of Licensing**. This Agreement is valid from the day it takes effect to the termination dated defined in **ZILLIZ** website, unless the User has disobeyed the terms and caused the Agreement to end in advance. Whether or not listed, if the User has violated terms in Clause 3, 4, 5 or 7, **ZILLIZ** may, in its sole and absolute discretion, terminate this License and the rights afforded to You. Upon the expiration or termination of the License:
aAll rights afforded to the User based upon this Agreement will be terminated. You shall ease use of the Product and uninstall related software;
bThe User shall return all confidential information and the copy (includes but not restricted to Product) back to **ZILLIZ**, or destroy all copy of confidential information on permission of **ZILLIZ**. Without the written approval of **ZILLIZ**, the User is not allowed to keep any confidential information or its copy provided by **ZILLIZ**.
13. **Third-party Resources**. Products supplied by **ZILLIZ** may include hyperlinks to other websites, content or resources ("Third Party Resources"), and the normal use of such products may depend on the availability of third party resources. **ZILLIZ** is unable to control any third-party resources. The User acknowledges and agrees that **ZILLIZ** is not responsible for the availability and security of third-party resources and does not guarantee any advertising, products or other materials that are or are derived from such third party resources. The User acknowledges and agrees that **ZILLIZ** shall not hold obligations about any liability for loss or damage that may be suffered due to the availability and security of third party resources, or the integrity or accuracy of any advertisements, products or other materials that the User relies on or obtains from third party resources.
14. **Other**. The entire contents of this Agreement are performed within the territory of the People's Republic of China and are governed by and construed in accordance with the laws of the People's Republic of China (but not applicable to the relevant conflict laws). **ZILLIZ** agrees that any disputes relating to this Agreement will be submitted to the Xuhui District People's Court of Shanghai, and irrevocably and unconditionally agree that the above courts have exclusive jurisdiction over all litigations and disputes brought about by this Agreement. Once it is determined that any provision is invalid, illegal or unenforceable, **ZILLIZ** reserves the right to modify and interpret the terms. Any notice that needs to be sent to the user, if posted on the **ZILLIZ** website, is deemed to have been validly and legally sent to the user. Except for the obligation to pay under this contract, neither party will be liable for failure to perform or delayed performance of this Agreement in whole or in part due to force majeure. The force majeure includes but is not limited to fire, storm, flood , earthquake, civil strife, telecommunications disruption, power outage or other infrastructure disruption, service interruption or termination caused by **ZILLIZ** service provider problems, strikes, intentional destruction events, cable cuts, virus intrusion or any other similar incidents caused by intentional or illegal acts by third parties. In the case of the above-mentioned delayed performance, the delay in fulfilling the agreement may be the delay time due to the above reasons. Unless otherwise stated in this Agreement, notices or communications required or endorsed by this Agreement must be signed or authorized in writing by a party, and delivered by direct delivery, overnight delivery, confirmed email, confirmed fax or by mailing a registered letter, registered mail, and returning the order, etc. Any modification, addition or deletion or waiver of this Agreement must be confirmed by a written confirmation by a suitably authorized representative of both parties. The non-performance or delay in the performance of any right or remedy by any party (partially or wholly) does not constitute a waiver of such rights or remedies, nor does it affect any other rights or remedies. All claims and remedies under this Agreement may be cumulative and do not exclude any other rights or remedies contained in this Agreement or as required by law. Exemption from the waiver or delay of any liability for breach of contract in this Agreement does not constitute an exemption from other subsequent breach of contract obligations.

View File

@ -1,13 +1,22 @@
### Compilation
#### Step 1: install necessery tools
Install MySQL
centos7 :
yum install gfortran flex bison
yum install gfortran qt4 flex bison mysql-devel
ubuntu16.04 :
sudo apt-get install gfortran flex bison
sudo apt-get install gfortran qt4-qmake flex bison libmysqlclient-dev
If `libmysqlclient_r.so` does not exist after installing MySQL Development Files, you need to create a symbolic link:
```
sudo ln -s /path/to/libmysqlclient.so /path/to/libmysqlclient_r.so
```
#### Step 2: build(output to cmake_build folder)
cmake_build/src/milvus_server is the server
cmake_build/src/libmilvus_engine.a is the static library
@ -39,9 +48,20 @@ If you encounter the following error when building:
or
./build.sh --unittest
#### To run code coverage
apt-get install lcov
./build.sh -u -c
### Launch server
Set config in cpp/conf/server_config.yaml
Add milvus/bin/lib to LD_LIBRARY_PATH
```
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/milvus/bin/lib
```
Then launch server with config:
cd [build output path]
start_server.sh

View File

@ -1,13 +1,14 @@
#!/bin/bash
BUILD_TYPE="Debug"
BUILD_UNITTEST="off"
BUILD_UNITTEST="OFF"
LICENSE_CHECK="OFF"
INSTALL_PREFIX=$(pwd)/milvus
MAKE_CLEAN="OFF"
BUILD_COVERAGE="OFF"
DB_PATH="/opt/milvus"
while getopts "p:t:uhlrc" arg
while getopts "p:d:t:uhlrc" arg
do
case $arg in
t)
@ -15,11 +16,14 @@ do
;;
u)
echo "Build and run unittest cases" ;
BUILD_UNITTEST="on";
BUILD_UNITTEST="ON";
;;
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
l)
LICENSE_CHECK="ON"
;;
@ -36,12 +40,13 @@ do
echo "
parameter:
-t: build type
-u: building unit test options
-p: install prefix
-l: build license version
-r: remove previous build directory
-c: code coverage
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-p: install prefix(default: $(pwd)/milvus)
-d: db path(default: /opt/milvus)
-l: build license version(default: OFF)
-r: remove previous build directory(default: OFF)
-c: code coverage(default: OFF)
usage:
./build.sh -t \${BUILD_TYPE} [-u] [-h] [-g] [-r] [-c]
@ -71,6 +76,7 @@ if [[ ${MAKE_CLEAN} == "ON" ]]; then
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DCMAKE_LICENSE_CHECK=${LICENSE_CHECK} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DMILVUS_DB_PATH=${DB_PATH} \
$@ ../"
echo ${CMAKE_CMD}

View File

@ -93,6 +93,8 @@ define_option(MILVUS_WITH_SQLITE "Build with SQLite library" ON)
define_option(MILVUS_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
define_option(MILVUS_WITH_MYSQLPP "Build with MySQL++" ON)
define_option(MILVUS_WITH_THRIFT "Build with Apache Thrift library" ON)
define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)

View File

@ -27,6 +27,7 @@ set(MILVUS_THIRDPARTY_DEPENDENCIES
JSONCONS
LAPACK
Lz4
MySQLPP
OpenBLAS
Prometheus
RocksDB
@ -57,14 +58,16 @@ macro(build_dependency DEPENDENCY_NAME)
build_easyloggingpp()
elseif("${DEPENDENCY_NAME}" STREQUAL "FAISS")
build_faiss()
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
build_gtest()
elseif("${DEPENDENCY_NAME}" STREQUAL "LAPACK")
build_lapack()
elseif("${DEPENDENCY_NAME}" STREQUAL "Knowhere")
build_knowhere()
elseif("${DEPENDENCY_NAME}" STREQUAL "Lz4")
build_lz4()
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
build_gtest()
elseif ("${DEPENDENCY_NAME}" STREQUAL "MySQLPP")
build_mysqlpp()
elseif ("${DEPENDENCY_NAME}" STREQUAL "JSONCONS")
build_jsoncons()
elseif ("${DEPENDENCY_NAME}" STREQUAL "OpenBLAS")
@ -274,6 +277,12 @@ else()
set(LZ4_SOURCE_URL "https://github.com/lz4/lz4/archive/${LZ4_VERSION}.tar.gz")
endif()
if(DEFINED ENV{MILVUS_MYSQLPP_URL})
set(MYSQLPP_SOURCE_URL "$ENV{MILVUS_MYSQLPP_URL}")
else()
set(MYSQLPP_SOURCE_URL "https://tangentsoft.com/mysqlpp/releases/mysql++-${MYSQLPP_VERSION}.tar.gz")
endif()
if (DEFINED ENV{MILVUS_OPENBLAS_URL})
set(OPENBLAS_SOURCE_URL "$ENV{MILVUS_OPENBLAS_URL}")
else ()
@ -886,8 +895,8 @@ macro(build_faiss)
# ${MAKE} ${MAKE_BUILD_ARGS}
BUILD_COMMAND
${MAKE} ${MAKE_BUILD_ARGS} all
COMMAND
cd gpu && make ${MAKE_BUILD_ARGS}
COMMAND
cd gpu && ${MAKE} ${MAKE_BUILD_ARGS}
BUILD_IN_SOURCE
1
# INSTALL_DIR
@ -1125,6 +1134,65 @@ if(MILVUS_WITH_LZ4)
include_directories(SYSTEM ${LZ4_INCLUDE_DIR})
endif()
# ----------------------------------------------------------------------
# MySQL++
macro(build_mysqlpp)
message(STATUS "Building MySQL++-${MYSQLPP_VERSION} from source")
set(MYSQLPP_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep")
set(MYSQLPP_INCLUDE_DIR "${MYSQLPP_PREFIX}/include")
set(MYSQLPP_SHARED_LIB
"${MYSQLPP_PREFIX}/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}")
set(MYSQLPP_CONFIGURE_ARGS
"--prefix=${MYSQLPP_PREFIX}"
"--enable-thread-check"
"CFLAGS=${EP_C_FLAGS}"
"CXXFLAGS=${EP_CXX_FLAGS}"
"LDFLAGS=-pthread")
externalproject_add(mysqlpp_ep
URL
${MYSQLPP_SOURCE_URL}
# GIT_REPOSITORY
# ${MYSQLPP_SOURCE_URL}
# GIT_TAG
# ${MYSQLPP_VERSION}
# GIT_SHALLOW
# TRUE
${EP_LOG_OPTIONS}
CONFIGURE_COMMAND
# "./bootstrap"
# COMMAND
"./configure"
${MYSQLPP_CONFIGURE_ARGS}
BUILD_COMMAND
${MAKE} ${MAKE_BUILD_ARGS}
BUILD_IN_SOURCE
1
BUILD_BYPRODUCTS
${MYSQLPP_SHARED_LIB})
file(MAKE_DIRECTORY "${MYSQLPP_INCLUDE_DIR}")
add_library(mysqlpp SHARED IMPORTED)
set_target_properties(
mysqlpp
PROPERTIES
IMPORTED_LOCATION "${MYSQLPP_SHARED_LIB}"
INTERFACE_INCLUDE_DIRECTORIES "${MYSQLPP_INCLUDE_DIR}")
add_dependencies(mysqlpp mysqlpp_ep)
endmacro()
if(MILVUS_WITH_MYSQLPP)
resolve_dependency(MySQLPP)
get_target_property(MYSQLPP_INCLUDE_DIR mysqlpp INTERFACE_INCLUDE_DIRECTORIES)
include_directories(SYSTEM "${MYSQLPP_INCLUDE_DIR}")
link_directories(SYSTEM ${MYSQLPP_PREFIX}/lib)
endif()
# ----------------------------------------------------------------------
# Prometheus

View File

@ -0,0 +1,27 @@
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = false
SUBSECOND_PRECISION = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
* DEBUG:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-warning.log"
* TRACE:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = false
TO_STANDARD_OUTPUT = false
## Error logs
* ERROR:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%H:%m}-fatal.log"

View File

@ -0,0 +1,33 @@
server_config:
address: 0.0.0.0
port: 19530 # the port milvus listen to, default: 19530, range: 1025 ~ 65534
gpu_index: 0 # the gpu milvus use, default: 0, range: 0 ~ gpu number - 1
mode: single # milvus deployment type: single, cluster
db_config:
db_path: @MILVUS_DB_PATH@ # milvus data storage path
# URI format: dialect://username:password@host:port/database
# All parts except dialect are optional, but you MUST include the delimiters
# Currently dialect supports mysql or sqlite
db_backend_url: sqlite://:@:/
index_building_threshold: 1024 # index building trigger threshold, default: 1024, unit: MB
archive_disk_threshold: 512 # triger archive action if storage size exceed this value, unit: GB
archive_days_threshold: 30 # files older than x days will be archived, unit: day
metric_config:
is_startup: off # if monitoring start: on, off
collector: prometheus # metrics collector: prometheus
prometheus_config: # following are prometheus configure
collect_type: pull # prometheus collect data method
port: 8080 # the port prometheus use to fetch metrics
push_gateway_ip_address: 127.0.0.1 # push method configure: push gateway ip address
push_gateway_port: 9091 # push method configure: push gateway port
license_config: # license configure
license_path: "@MILVUS_DB_PATH@/system.license" # license file path
cache_config: # cache configure
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory

View File

@ -1,27 +0,0 @@
server_config:
address: 0.0.0.0
port: 19530
transfer_protocol: binary #optional: binary, compact, json
server_mode: thread_pool #optional: simple, thread_pool
gpu_index: 0 #which gpu to be used
mode: single #optional: single, cluster
db_config:
db_path: /tmp/milvus
db_backend_url: http://127.0.0.1
index_building_threshold: 1024 #build index file when raw data file size larger than this value, unit: MB
metric_config:
is_startup: true # true is on, false is off
collector: prometheus # prometheus, now we only have prometheus
prometheus_config:
collect_type: pull # pull means prometheus pull the message from server, push means server push metric to push gateway
port: 8080
push_gateway_ip_address: 127.0.0.1
push_gateway_port: 9091
license_config:
license_path: "/tmp/system.license"
cache_config:
cpu_cache_capacity: 16 # memory pool to hold index data, unit: GB

View File

@ -1,5 +1,7 @@
#!/bin/bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/milvus/lib
LCOV_CMD="lcov"
LCOV_GEN_CMD="genhtml"
@ -12,6 +14,26 @@ DIR_LCOV_OUTPUT="lcov_out"
DIR_GCNO="cmake_build"
DIR_UNITTEST="milvus/bin"
MYSQL_USER_NAME=root
MYSQL_PASSWORD=Fantast1c
MYSQL_HOST='192.168.1.194'
MYSQL_PORT='3306'
MYSQL_DB_NAME=milvus_`date +%s%N`
function mysql_exc()
{
cmd=$1
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
if [ $? -ne 0 ]; then
echo "mysql $cmd run failed"
fi
}
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
mysql_exc "FLUSH PRIVILEGES;"
# get baseline
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
if [ $? -ne 0 ]; then
@ -21,16 +43,24 @@ fi
for test in `ls ${DIR_UNITTEST}`; do
echo $test
case ${test} in
case ${test} in
db_test)
# set run args for db_test
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
;;
*_test)
# run unittest
./${DIR_UNITTEST}/${test}
if [ $? -ne 0 ]; then
echo ${DIR_UNITTEST}/${test} "run failed"
fi
args=""
;;
esac
# run unittest
./${DIR_UNITTEST}/${test} "${args}"
if [ $? -ne 0 ]; then
echo ${DIR_UNITTEST}/${test} "run failed"
fi
done
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
# gen test converage
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
# merge coverage
@ -43,4 +73,4 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"*/cmake_build/*_ep-prefix/*" \
# gen html report
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/

View File

@ -64,6 +64,7 @@ set(s3_client_files
include_directories(/usr/include)
include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include")
include_directories(thrift/gen-cpp)
include_directories(/usr/include/mysql)
set(third_party_libs
knowhere
@ -91,6 +92,7 @@ set(third_party_libs
zstd
cudart
cublas
mysqlpp
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
)
if (MEGASEARCH_WITH_ARROW STREQUAL "ON")
@ -190,4 +192,10 @@ endif ()
install(TARGETS milvus_server DESTINATION bin)
add_subdirectory(sdk)
install(FILES
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3.2.4
DESTINATION lib) #need to copy libmysqlpp.so
#add_subdirectory(sdk)

View File

@ -7,11 +7,13 @@
#include "DBMetaImpl.h"
#include "Log.h"
#include "EngineFactory.h"
#include "Factories.h"
#include "metrics/Metrics.h"
#include "scheduler/TaskScheduler.h"
#include "scheduler/context/SearchContext.h"
#include "scheduler/context/DeleteContext.h"
#include "utils/TimeRecorder.h"
#include "MetaConsts.h"
#include <assert.h>
#include <chrono>
@ -27,9 +29,9 @@ namespace engine {
namespace {
static constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
static constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
static constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
void CollectInsertMetrics(double total_time, size_t n, bool succeed) {
double avg_time = total_time / n;
@ -76,67 +78,20 @@ void CollectFileMetrics(int file_type, size_t file_size, double total_time) {
}
}
}
void CalcScore(uint64_t vector_count,
const float *vectors_data,
uint64_t dimension,
const SearchContext::ResultSet &result_src,
SearchContext::ResultSet &result_target) {
result_target.clear();
if(result_src.empty()){
return;
}
server::TimeRecorder rc("Calculate Score");
int vec_index = 0;
for(auto& result : result_src) {
const float * vec_data = vectors_data + vec_index*dimension;
double vec_len = 0;
for(uint64_t i = 0; i < dimension; i++) {
vec_len += vec_data[i]*vec_data[i];
}
vec_index++;
double max_score = 0.0;
for(auto& pair : result) {
if(max_score < pair.second) {
max_score = pair.second;
}
}
//makesure socre is less than 100
if(max_score > vec_len) {
vec_len = max_score;
}
//avoid divided by zero
static constexpr double TOLERANCE = std::numeric_limits<float>::epsilon();
if(vec_len < TOLERANCE) {
vec_len = TOLERANCE;
}
SearchContext::Id2ScoreMap score_array;
double vec_len_inverse = 1.0/vec_len;
for(auto& pair : result) {
score_array.push_back(std::make_pair(pair.first, (1 - pair.second*vec_len_inverse)*100.0));
}
result_target.emplace_back(score_array);
}
rc.Elapse("totally cost");
}
}
DBImpl::DBImpl(const Options& options)
: options_(options),
shutting_down_(false),
meta_ptr_(new meta::DBMetaImpl(options_.meta)),
mem_mgr_(new MemManager(meta_ptr_, options_)),
compact_thread_pool_(1, 1),
index_thread_pool_(1, 1) {
StartTimerTasks();
meta_ptr_ = DBMetaImplFactory::Build(options.meta, options.mode);
mem_mgr_ = std::make_shared<MemManager>(meta_ptr_, options_);
// mem_mgr_ = (MemManagerPtr)(new MemManager(meta_ptr_, options_));
if (options.mode != Options::MODE::READ_ONLY) {
StartTimerTasks();
}
}
Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
@ -203,10 +158,6 @@ Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq,
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
#if 0
return QuerySync(table_id, k, nq, vectors, dates, results);
#else
//get all table files from table
meta::DatePartionedTableFilesSchema files;
auto status = meta_ptr_->FilesToSearch(table_id, dates, files);
@ -220,7 +171,6 @@ Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
}
return QueryAsync(table_id, file_id_array, k, nq, vectors, dates, results);
#endif
}
Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids,
@ -232,7 +182,7 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
std::string::size_type sz;
ids.push_back(std::stol(id, &sz));
ids.push_back(std::stoul(id, &sz));
}
meta::TableFilesSchema files_array;
@ -248,145 +198,6 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
return QueryAsync(table_id, files_array, k, nq, vectors, dates, results);
}
Status DBImpl::QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
meta::DatePartionedTableFilesSchema files;
auto status = meta_ptr_->FilesToSearch(table_id, dates, files);
if (!status.ok()) { return status; }
ENGINE_LOG_DEBUG << "Search DateT Size = " << files.size();
meta::TableFilesSchema index_files;
meta::TableFilesSchema raw_files;
for (auto &day_files : files) {
for (auto &file : day_files.second) {
file.file_type_ == meta::TableFileSchema::INDEX ?
index_files.push_back(file) : raw_files.push_back(file);
}
}
int dim = 0;
if (!index_files.empty()) {
dim = index_files[0].dimension_;
} else if (!raw_files.empty()) {
dim = raw_files[0].dimension_;
} else {
ENGINE_LOG_DEBUG << "no files to search";
return Status::OK();
}
{
// [{ids, distence}, ...]
using SearchResult = std::pair<std::vector<long>, std::vector<float>>;
std::vector<SearchResult> batchresult(nq); // allocate nq cells.
auto cluster = [&](long *nns, float *dis, const int& k) -> void {
for (int i = 0; i < nq; ++i) {
auto f_begin = batchresult[i].first.cbegin();
auto s_begin = batchresult[i].second.cbegin();
batchresult[i].first.insert(f_begin, nns + i * k, nns + i * k + k);
batchresult[i].second.insert(s_begin, dis + i * k, dis + i * k + k);
}
};
// Allocate Memory
float *output_distence;
long *output_ids;
output_distence = (float *) malloc(k * nq * sizeof(float));
output_ids = (long *) malloc(k * nq * sizeof(long));
memset(output_distence, 0, k * nq * sizeof(float));
memset(output_ids, 0, k * nq * sizeof(long));
long search_set_size = 0;
auto search_in_index = [&](meta::TableFilesSchema& file_vec) -> void {
for (auto &file : file_vec) {
ExecutionEnginePtr index = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_);
index->Load();
auto file_size = index->PhysicalSize();
search_set_size += file_size;
ENGINE_LOG_DEBUG << "Search file_type " << file.file_type_ << " Of Size: "
<< file_size/(1024*1024) << " M";
int inner_k = index->Count() < k ? index->Count() : k;
auto start_time = METRICS_NOW_TIME;
index->Search(nq, vectors, inner_k, output_distence, output_ids);
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
CollectFileMetrics(file.file_type_, file_size, total_time);
cluster(output_ids, output_distence, inner_k); // cluster to each query
memset(output_distence, 0, k * nq * sizeof(float));
memset(output_ids, 0, k * nq * sizeof(long));
}
};
auto topk_cpu = [](const std::vector<float> &input_data,
const int &k,
float *output_distence,
long *output_ids) -> void {
std::map<float, std::vector<int>> inverted_table;
for (int i = 0; i < input_data.size(); ++i) {
if (inverted_table.count(input_data[i]) == 1) {
auto& ori_vec = inverted_table[input_data[i]];
ori_vec.push_back(i);
}
else {
inverted_table[input_data[i]] = std::vector<int>{i};
}
}
int count = 0;
for (auto &item : inverted_table){
if (count == k) break;
for (auto &id : item.second){
output_distence[count] = item.first;
output_ids[count] = id;
if (++count == k) break;
}
}
};
auto cluster_topk = [&]() -> void {
QueryResult res;
for (auto &result_pair : batchresult) {
auto &dis = result_pair.second;
auto &nns = result_pair.first;
topk_cpu(dis, k, output_distence, output_ids);
int inner_k = dis.size() < k ? dis.size() : k;
for (int i = 0; i < inner_k; ++i) {
res.emplace_back(std::make_pair(nns[output_ids[i]], output_distence[i])); // mapping
}
results.push_back(res); // append to result list
res.clear();
memset(output_distence, 0, k * nq * sizeof(float));
memset(output_ids, 0, k * nq * sizeof(long));
}
};
search_in_index(raw_files);
search_in_index(index_files);
ENGINE_LOG_DEBUG << "Search Overall Set Size = " << search_set_size << " M";
cluster_topk();
free(output_distence);
free(output_ids);
}
if (results.empty()) {
return Status::NotFound("Group " + table_id + ", search result not found!");
}
QueryResults temp_results;
CalcScore(nq, vectors, dim, results, temp_results);
results.swap(temp_results);
return Status::OK();
}
Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results) {
@ -405,13 +216,8 @@ Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSch
context->WaitResult();
//step 3: construct results, calculate score between 0 ~ 100
auto& context_result = context->GetResult();
meta::TableSchema table_schema;
table_schema.table_id_ = table_id;
meta_ptr_->DescribeTable(table_schema);
CalcScore(context->nq(), context->vectors(), table_schema.dimension_, context_result, results);
//step 3: construct results
results = context->GetResult();
return Status::OK();
}
@ -465,14 +271,19 @@ void DBImpl::StartMetricTask() {
}
void DBImpl::StartCompactionTask() {
// static int count = 0;
// count++;
// std::cout << "StartCompactionTask: " << count << std::endl;
// std::cout << "c: " << count++ << std::endl;
static uint64_t compact_clock_tick = 0;
compact_clock_tick++;
if(compact_clock_tick%COMPACT_ACTION_INTERVAL != 0) {
// std::cout << "c r: " << count++ << std::endl;
return;
}
//serialize memory data
std::vector<std::string> temp_table_ids;
std::set<std::string> temp_table_ids;
mem_mgr_->Serialize(temp_table_ids);
for(auto& id : temp_table_ids) {
compact_table_ids_.insert(id);
@ -543,7 +354,8 @@ Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
ENGINE_LOG_DEBUG << "New merged file " << table_file.file_id_ <<
" of size=" << index->PhysicalSize()/(1024*1024) << " M";
index->Cache();
//current disable this line to avoid memory
//index->Cache();
return status;
}
@ -573,8 +385,12 @@ Status DBImpl::BackgroundMergeFiles(const std::string& table_id) {
}
void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
// static int b_count = 0;
// b_count++;
// std::cout << "BackgroundCompaction: " << b_count << std::endl;
Status status;
for (auto table_id : table_ids) {
for (auto& table_id : table_ids) {
status = BackgroundMergeFiles(table_id);
if (!status.ok()) {
bg_error_ = status;
@ -583,7 +399,13 @@ void DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
}
meta_ptr_->Archive();
meta_ptr_->CleanUpFilesWithTTL(1);
int ttl = 1;
if (options_.mode == Options::MODE::CLUSTER) {
ttl = meta::D_SEC;
// ENGINE_LOG_DEBUG << "Server mode is cluster. Clean up files with ttl = " << std::to_string(ttl) << "seconds.";
}
meta_ptr_->CleanUpFilesWithTTL(ttl);
}
void DBImpl::StartBuildIndexTask() {
@ -659,7 +481,8 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
<< index->PhysicalSize()/(1024*1024) << " M"
<< " from file " << to_remove.file_id_;
index->Cache();
//current disable this line to avoid memory
//index->Cache();
} catch (std::exception& ex) {
return Status::Error("Build index encounter exception", ex.what());
@ -698,7 +521,7 @@ Status DBImpl::Size(uint64_t& result) {
DBImpl::~DBImpl() {
shutting_down_.store(true, std::memory_order_release);
bg_timer_thread_.join();
std::vector<std::string> ids;
std::set<std::string> ids;
mem_mgr_->Serialize(ids);
}

View File

@ -17,6 +17,8 @@
#include <thread>
#include <list>
#include <set>
#include "scheduler/context/SearchContext.h"
namespace zilliz {
namespace milvus {
@ -25,49 +27,72 @@ namespace engine {
class Env;
namespace meta {
class Meta;
class Meta;
}
class DBImpl : public DB {
public:
public:
using MetaPtr = meta::Meta::Ptr;
using MemManagerPtr = typename MemManager::Ptr;
DBImpl(const Options& options);
explicit DBImpl(const Options &options);
virtual Status CreateTable(meta::TableSchema& table_schema) override;
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
virtual Status DescribeTable(meta::TableSchema& table_schema) override;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
Status
CreateTable(meta::TableSchema &table_schema) override;
virtual Status InsertVectors(const std::string& table_id,
uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
Status
DeleteTable(const std::string &table_id, const meta::DatesT &dates) override;
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, QueryResults& results) override;
Status
DescribeTable(meta::TableSchema &table_schema) override;
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
Status
HasTable(const std::string &table_id, bool &has_or_not) override;
virtual Status Query(const std::string& table_id, const std::vector<std::string>& file_ids,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results) override;
Status
AllTables(std::vector<meta::TableSchema> &table_schema_array) override;
virtual Status DropAll() override;
Status
GetTableRowCount(const std::string &table_id, uint64_t &row_count) override;
virtual Status Size(uint64_t& result) override;
Status
InsertVectors(const std::string &table_id, uint64_t n, const float *vectors, IDNumbers &vector_ids) override;
virtual ~DBImpl();
Status
Query(const std::string &table_id, uint64_t k, uint64_t nq, const float *vectors, QueryResults &results) override;
private:
Status QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
const float* vectors, const meta::DatesT& dates, QueryResults& results);
Status
Query(const std::string &table_id,
uint64_t k,
uint64_t nq,
const float *vectors,
const meta::DatesT &dates,
QueryResults &results) override;
Status QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files,
uint64_t k, uint64_t nq, const float* vectors,
const meta::DatesT& dates, QueryResults& results);
Status
Query(const std::string &table_id,
const std::vector<std::string> &file_ids,
uint64_t k,
uint64_t nq,
const float *vectors,
const meta::DatesT &dates,
QueryResults &results) override;
Status DropAll() override;
Status Size(uint64_t &result) override;
~DBImpl() override;
private:
Status
QueryAsync(const std::string &table_id,
const meta::TableFilesSchema &files,
uint64_t k,
uint64_t nq,
const float *vectors,
const meta::DatesT &dates,
QueryResults &results);
void StartTimerTasks();
@ -76,15 +101,19 @@ private:
void StartMetricTask();
void StartCompactionTask();
Status MergeFiles(const std::string& table_id,
const meta::DateT& date,
const meta::TableFilesSchema& files);
Status BackgroundMergeFiles(const std::string& table_id);
Status MergeFiles(const std::string &table_id,
const meta::DateT &date,
const meta::TableFilesSchema &files);
Status BackgroundMergeFiles(const std::string &table_id);
void BackgroundCompaction(std::set<std::string> table_ids);
void StartBuildIndexTask();
void BackgroundBuildIndex();
Status BuildIndex(const meta::TableFileSchema&);
Status
BuildIndex(const meta::TableFileSchema &);
private:
const Options options_;

View File

@ -183,6 +183,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
}
Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
try {
MetricCollector metric;
@ -192,9 +193,11 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
auto table = ConnectorPtr->select(columns(&TableSchema::state_),
where(c(&TableSchema::table_id_) == table_schema.table_id_));
if (table.size() == 1) {
std::string msg = (TableSchema::TO_DELETE == std::get<0>(table[0])) ?
"Table already exists and it is in delete state, please wait a second" : "Table already exists";
return Status::Error(msg);
if(TableSchema::TO_DELETE == std::get<0>(table[0])) {
return Status::Error("Table already exists and it is in delete state, please wait a second");
} else {
return Status::OK();//table already exists, no error
}
}
}
@ -328,7 +331,7 @@ Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
}
} catch (std::exception &e) {
HandleException("Encounter exception when lookup table", e);
return HandleException("Encounter exception when lookup table", e);
}
return Status::OK();
@ -358,7 +361,7 @@ Status DBMetaImpl::AllTables(std::vector<TableSchema>& table_schema_array) {
}
} catch (std::exception &e) {
HandleException("Encounter exception when lookup all tables", e);
return HandleException("Encounter exception when lookup all tables", e);
}
return Status::OK();
@ -655,7 +658,7 @@ Status DBMetaImpl::Archive() {
for (auto kv : criterias) {
auto &criteria = kv.first;
auto &limit = kv.second;
if (criteria == "days") {
if (criteria == engine::ARCHIVE_CONF_DAYS) {
long usecs = limit * D_SEC * US_PS;
long now = utils::GetMicroSecTimeStamp();
try {
@ -671,11 +674,11 @@ Status DBMetaImpl::Archive() {
return HandleException("Encounter exception when update table files", e);
}
}
if (criteria == "disk") {
if (criteria == engine::ARCHIVE_CONF_DISK) {
uint64_t sum = 0;
Size(sum);
auto to_delete = (sum - limit * G);
int64_t to_delete = (int64_t)sum - limit * G;
DiscardFiles(to_delete);
}
}

View File

@ -11,14 +11,9 @@ namespace zilliz {
namespace milvus {
namespace engine {
Status ExecutionEngine::AddWithIds(const std::vector<float>& vectors, const std::vector<long>& vector_ids) {
long n1 = (long)vectors.size();
long n2 = (long)vector_ids.size();
if (n1 != n2) {
LOG(ERROR) << "vectors size is not equal to the size of vector_ids: " << n1 << "!=" << n2;
return Status::Error("Error: AddWithIds");
}
return AddWithIds(n1, vectors.data(), vector_ids.data());
Status ExecutionEngine::AddWithIdArray(const std::vector<float>& vectors, const std::vector<long>& vector_ids) {
long n = (long)vector_ids.size();
return AddWithIds(n, vectors.data(), vector_ids.data());
}

View File

@ -23,8 +23,7 @@ enum class EngineType {
class ExecutionEngine {
public:
virtual Status AddWithIds(const std::vector<float>& vectors,
const std::vector<long>& vector_ids);
virtual Status AddWithIdArray(const std::vector<float>& vectors, const std::vector<long>& vector_ids);
virtual Status AddWithIds(long n, const float *xdata, const long *xids) = 0;

View File

@ -3,16 +3,18 @@
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include "Factories.h"
#include "DBImpl.h"
#include <stdlib.h>
#include <time.h>
#include <sstream>
#include <iostream>
#include <vector>
#include <assert.h>
#include <easylogging++.h>
#include <regex>
#include "Exception.h"
namespace zilliz {
namespace milvus {
@ -26,6 +28,7 @@ DBMetaOptions DBMetaOptionsFactory::Build(const std::string& path) {
ss << "/tmp/" << rand();
p = ss.str();
}
DBMetaOptions meta;
meta.path = p;
return meta;
@ -43,6 +46,48 @@ std::shared_ptr<meta::DBMetaImpl> DBMetaImplFactory::Build() {
return std::shared_ptr<meta::DBMetaImpl>(new meta::DBMetaImpl(options));
}
std::shared_ptr<meta::Meta> DBMetaImplFactory::Build(const DBMetaOptions& metaOptions,
const int& mode) {
std::string uri = metaOptions.backend_uri;
std::string dialectRegex = "(.*)";
std::string usernameRegex = "(.*)";
std::string passwordRegex = "(.*)";
std::string hostRegex = "(.*)";
std::string portRegex = "(.*)";
std::string dbNameRegex = "(.*)";
std::string uriRegexStr = dialectRegex + "\\:\\/\\/" +
usernameRegex + "\\:" +
passwordRegex + "\\@" +
hostRegex + "\\:" +
portRegex + "\\/" +
dbNameRegex;
std::regex uriRegex(uriRegexStr);
std::smatch pieces_match;
if (std::regex_match(uri, pieces_match, uriRegex)) {
std::string dialect = pieces_match[1].str();
std::transform(dialect.begin(), dialect.end(), dialect.begin(), ::tolower);
if (dialect.find("mysql") != std::string::npos) {
ENGINE_LOG_INFO << "Using MySQL";
return std::make_shared<meta::MySQLMetaImpl>(meta::MySQLMetaImpl(metaOptions, mode));
}
else if (dialect.find("sqlite") != std::string::npos) {
ENGINE_LOG_INFO << "Using SQLite";
return std::make_shared<meta::DBMetaImpl>(meta::DBMetaImpl(metaOptions));
}
else {
ENGINE_LOG_ERROR << "Invalid dialect in URI: dialect = " << dialect;
throw InvalidArgumentException("URI dialect is not mysql / sqlite");
}
}
else {
ENGINE_LOG_ERROR << "Wrong URI format: URI = " << uri;
throw InvalidArgumentException("Wrong URI format ");
}
}
std::shared_ptr<DB> DBFactory::Build() {
auto options = OptionsFactory::Build();
auto db = DBFactory::Build(options);

View File

@ -7,6 +7,7 @@
#include "DB.h"
#include "DBMetaImpl.h"
#include "MySQLMetaImpl.h"
#include "Options.h"
#include "ExecutionEngine.h"
@ -27,6 +28,7 @@ struct OptionsFactory {
struct DBMetaImplFactory {
static std::shared_ptr<meta::DBMetaImpl> Build();
static std::shared_ptr<meta::Meta> Build(const DBMetaOptions& metaOptions, const int& mode);
};
struct DBFactory {

View File

@ -20,36 +20,54 @@ namespace engine {
MemVectors::MemVectors(const std::shared_ptr<meta::Meta>& meta_ptr,
const meta::TableFileSchema& schema, const Options& options)
: pMeta_(meta_ptr),
: meta_(meta_ptr),
options_(options),
schema_(schema),
pIdGenerator_(new SimpleIDGenerator()),
pEE_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) {
id_generator_(new SimpleIDGenerator()),
active_engine_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) {
}
void MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) {
Status MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) {
if(active_engine_ == nullptr) {
return Status::Error("index engine is null");
}
auto start_time = METRICS_NOW_TIME;
pIdGenerator_->GetNextIDNumbers(n_, vector_ids_);
pEE_->AddWithIds(n_, vectors_, vector_ids_.data());
id_generator_->GetNextIDNumbers(n_, vector_ids_);
Status status = active_engine_->AddWithIds(n_, vectors_, vector_ids_.data());
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast<int>(n_), static_cast<int>(schema_.dimension_), total_time);
return status;
}
size_t MemVectors::Total() const {
return pEE_->Count();
size_t MemVectors::RowCount() const {
if(active_engine_ == nullptr) {
return 0;
}
return active_engine_->Count();
}
size_t MemVectors::ApproximateSize() const {
return pEE_->Size();
size_t MemVectors::Size() const {
if(active_engine_ == nullptr) {
return 0;
}
return active_engine_->Size();
}
Status MemVectors::Serialize(std::string& table_id) {
if(active_engine_ == nullptr) {
return Status::Error("index engine is null");
}
table_id = schema_.table_id_;
auto size = ApproximateSize();
auto size = Size();
auto start_time = METRICS_NOW_TIME;
pEE_->Serialize();
active_engine_->Serialize();
auto end_time = METRICS_NOW_TIME;
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
schema_.size_ = size;
@ -59,20 +77,20 @@ Status MemVectors::Serialize(std::string& table_id) {
schema_.file_type_ = (size >= options_.index_trigger_size) ?
meta::TableFileSchema::TO_INDEX : meta::TableFileSchema::RAW;
auto status = pMeta_->UpdateTableFile(schema_);
auto status = meta_->UpdateTableFile(schema_);
LOG(DEBUG) << "New " << ((schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
<< " file " << schema_.file_id_ << " of size " << (double)(pEE_->Size()) / (double)meta::M << " M";
<< " file " << schema_.file_id_ << " of size " << (double)(active_engine_->Size()) / (double)meta::M << " M";
pEE_->Cache();
active_engine_->Cache();
return status;
}
MemVectors::~MemVectors() {
if (pIdGenerator_ != nullptr) {
delete pIdGenerator_;
pIdGenerator_ = nullptr;
if (id_generator_ != nullptr) {
delete id_generator_;
id_generator_ = nullptr;
}
}
@ -81,20 +99,20 @@ MemVectors::~MemVectors() {
*/
MemManager::MemVectorsPtr MemManager::GetMemByTable(
const std::string& table_id) {
auto memIt = memMap_.find(table_id);
if (memIt != memMap_.end()) {
auto memIt = mem_id_map_.find(table_id);
if (memIt != mem_id_map_.end()) {
return memIt->second;
}
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
auto status = pMeta_->CreateTableFile(table_file);
auto status = meta_->CreateTableFile(table_file);
if (!status.ok()) {
return nullptr;
}
memMap_[table_id] = MemVectorsPtr(new MemVectors(pMeta_, table_file, options_));
return memMap_[table_id];
mem_id_map_[table_id] = MemVectorsPtr(new MemVectors(meta_, table_file, options_));
return mem_id_map_[table_id];
}
Status MemManager::InsertVectors(const std::string& table_id_,
@ -114,37 +132,62 @@ Status MemManager::InsertVectorsNoLock(const std::string& table_id,
if (mem == nullptr) {
return Status::NotFound("Group " + table_id + " not found!");
}
mem->Add(n, vectors, vector_ids);
return Status::OK();
//makesure each file size less than index_trigger_size
if(mem->Size() > options_.index_trigger_size) {
std::unique_lock<std::mutex> lock(serialization_mtx_);
immu_mem_list_.push_back(mem);
mem_id_map_.erase(table_id);
return InsertVectorsNoLock(table_id, n, vectors, vector_ids);
} else {
return mem->Add(n, vectors, vector_ids);
}
}
Status MemManager::ToImmutable() {
std::unique_lock<std::mutex> lock(mutex_);
for (auto& kv: memMap_) {
immMems_.push_back(kv.second);
MemIdMap temp_map;
for (auto& kv: mem_id_map_) {
if(kv.second->RowCount() == 0) {
temp_map.insert(kv);
continue;//empty vector, no need to serialize
}
immu_mem_list_.push_back(kv.second);
}
memMap_.clear();
mem_id_map_.swap(temp_map);
return Status::OK();
}
Status MemManager::Serialize(std::vector<std::string>& table_ids) {
Status MemManager::Serialize(std::set<std::string>& table_ids) {
ToImmutable();
std::unique_lock<std::mutex> lock(serialization_mtx_);
std::string table_id;
table_ids.clear();
for (auto& mem : immMems_) {
for (auto& mem : immu_mem_list_) {
mem->Serialize(table_id);
table_ids.push_back(table_id);
table_ids.insert(table_id);
}
immMems_.clear();
immu_mem_list_.clear();
return Status::OK();
}
Status MemManager::EraseMemVector(const std::string& table_id) {
std::unique_lock<std::mutex> lock(mutex_);
memMap_.erase(table_id);
{//erase MemVector from rapid-insert cache
std::unique_lock<std::mutex> lock(mutex_);
mem_id_map_.erase(table_id);
}
{//erase MemVector from serialize cache
std::unique_lock<std::mutex> lock(serialization_mtx_);
MemList temp_list;
for (auto& mem : immu_mem_list_) {
if(mem->TableId() != table_id) {
temp_list.push_back(mem);
}
}
immu_mem_list_.swap(temp_list);
}
return Status::OK();
}

View File

@ -15,6 +15,7 @@
#include <ctime>
#include <memory>
#include <mutex>
#include <set>
namespace zilliz {
namespace milvus {
@ -32,11 +33,11 @@ public:
explicit MemVectors(const std::shared_ptr<meta::Meta>&,
const meta::TableFileSchema&, const Options&);
void Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_);
Status Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_);
size_t Total() const;
size_t RowCount() const;
size_t ApproximateSize() const;
size_t Size() const;
Status Serialize(std::string& table_id);
@ -44,16 +45,18 @@ public:
const std::string& Location() const { return schema_.location_; }
std::string TableId() const { return schema_.table_id_; }
private:
MemVectors() = delete;
MemVectors(const MemVectors&) = delete;
MemVectors& operator=(const MemVectors&) = delete;
MetaPtr pMeta_;
MetaPtr meta_;
Options options_;
meta::TableFileSchema schema_;
IDGenerator* pIdGenerator_;
ExecutionEnginePtr pEE_;
IDGenerator* id_generator_;
ExecutionEnginePtr active_engine_;
}; // MemVectors
@ -66,14 +69,14 @@ public:
using Ptr = std::shared_ptr<MemManager>;
MemManager(const std::shared_ptr<meta::Meta>& meta, const Options& options)
: pMeta_(meta), options_(options) {}
: meta_(meta), options_(options) {}
MemVectorsPtr GetMemByTable(const std::string& table_id);
Status InsertVectors(const std::string& table_id,
size_t n, const float* vectors, IDNumbers& vector_ids);
Status Serialize(std::vector<std::string>& table_ids);
Status Serialize(std::set<std::string>& table_ids);
Status EraseMemVector(const std::string& table_id);
@ -82,11 +85,11 @@ private:
size_t n, const float* vectors, IDNumbers& vector_ids);
Status ToImmutable();
using MemMap = std::map<std::string, MemVectorsPtr>;
using ImmMemPool = std::vector<MemVectorsPtr>;
MemMap memMap_;
ImmMemPool immMems_;
MetaPtr pMeta_;
using MemIdMap = std::map<std::string, MemVectorsPtr>;
using MemList = std::vector<MemVectorsPtr>;
MemIdMap mem_id_map_;
MemList immu_mem_list_;
MetaPtr meta_;
Options options_;
std::mutex mutex_;
std::mutex serialization_mtx_;

View File

@ -0,0 +1,113 @@
#include "mysql++/mysql++.h"
#include <string>
#include <unistd.h>
#include <atomic>
#include "Log.h"
class MySQLConnectionPool : public mysqlpp::ConnectionPool {
public:
// The object's only constructor
MySQLConnectionPool(std::string dbName,
std::string userName,
std::string passWord,
std::string serverIp,
int port = 0,
int maxPoolSize = 8) :
db_(dbName),
user_(userName),
password_(passWord),
server_(serverIp),
port_(port),
max_pool_size_(maxPoolSize)
{
conns_in_use_ = 0;
max_idle_time_ = 10; //10 seconds
}
// The destructor. We _must_ call ConnectionPool::clear() here,
// because our superclass can't do it for us.
~MySQLConnectionPool() override {
clear();
}
// Do a simple form of in-use connection limiting: wait to return
// a connection until there are a reasonably low number in use
// already. Can't do this in create() because we're interested in
// connections actually in use, not those created. Also note that
// we keep our own count; ConnectionPool::size() isn't the same!
mysqlpp::Connection* grab() override {
while (conns_in_use_ > max_pool_size_) {
sleep(1);
}
++conns_in_use_;
return mysqlpp::ConnectionPool::grab();
}
// Other half of in-use conn count limit
void release(const mysqlpp::Connection* pc) override {
mysqlpp::ConnectionPool::release(pc);
if (conns_in_use_ <= 0) {
ENGINE_LOG_WARNING << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = " << conns_in_use_ << std::endl;
}
else {
--conns_in_use_;
}
}
int getConnectionsInUse() {
return conns_in_use_;
}
void set_max_idle_time(int max_idle) {
max_idle_time_ = max_idle;
}
std::string getDB() {
return db_;
}
protected:
// Superclass overrides
mysqlpp::Connection* create() override {
// Create connection using the parameters we were passed upon
// creation.
mysqlpp::Connection* conn = new mysqlpp::Connection();
conn->set_option(new mysqlpp::ReconnectOption(true));
conn->connect(db_.empty() ? 0 : db_.c_str(),
server_.empty() ? 0 : server_.c_str(),
user_.empty() ? 0 : user_.c_str(),
password_.empty() ? 0 : password_.c_str(),
port_);
return conn;
}
void destroy(mysqlpp::Connection* cp) override {
// Our superclass can't know how we created the Connection, so
// it delegates destruction to us, to be safe.
delete cp;
}
unsigned int max_idle_time() override {
return max_idle_time_;
}
private:
// Number of connections currently in use
std::atomic<int> conns_in_use_;
// Our connection parameters
std::string db_, user_, password_, server_;
int port_;
int max_pool_size_;
unsigned int max_idle_time_;
};

1821
cpp/src/db/MySQLMetaImpl.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include "Meta.h"
#include "Options.h"
#include "MySQLConnectionPool.h"
#include "mysql++/mysql++.h"
#include <mutex>
namespace zilliz {
namespace milvus {
namespace engine {
namespace meta {
// auto StoragePrototype(const std::string& path);
using namespace mysqlpp;
class MySQLMetaImpl : public Meta {
public:
MySQLMetaImpl(const DBMetaOptions& options_, const int& mode);
virtual Status CreateTable(TableSchema& table_schema) override;
virtual Status DescribeTable(TableSchema& group_info_) override;
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
virtual Status DeleteTable(const std::string& table_id) override;
virtual Status DeleteTableFiles(const std::string& table_id) override;
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
virtual Status DropPartitionsByDates(const std::string& table_id,
const DatesT& dates) override;
virtual Status GetTableFiles(const std::string& table_id,
const std::vector<size_t>& ids,
TableFilesSchema& table_files) override;
virtual Status UpdateTableFile(TableFileSchema& file_schema) override;
virtual Status UpdateTableFiles(TableFilesSchema& files) override;
virtual Status FilesToSearch(const std::string& table_id,
const DatesT& partition,
DatePartionedTableFilesSchema& files) override;
virtual Status FilesToMerge(const std::string& table_id,
DatePartionedTableFilesSchema& files) override;
virtual Status FilesToIndex(TableFilesSchema&) override;
virtual Status Archive() override;
virtual Status Size(uint64_t& result) override;
virtual Status CleanUp() override;
virtual Status CleanUpFilesWithTTL(uint16_t seconds) override;
virtual Status DropAll() override;
virtual Status Count(const std::string& table_id, uint64_t& result) override;
virtual ~MySQLMetaImpl();
private:
Status NextFileId(std::string& file_id);
Status NextTableId(std::string& table_id);
Status DiscardFiles(long long to_discard_size);
std::string GetTablePath(const std::string& table_id);
std::string GetTableDatePartitionPath(const std::string& table_id, DateT& date);
void GetTableFilePath(TableFileSchema& group_file);
Status Initialize();
const DBMetaOptions options_;
const int mode_;
std::shared_ptr<MySQLConnectionPool> mysql_connection_pool_;
bool safe_grab = false;
// std::mutex connectionMutex_;
}; // DBMetaImpl
} // namespace meta
} // namespace engine
} // namespace milvus
} // namespace zilliz

View File

@ -24,6 +24,12 @@ ArchiveConf::ArchiveConf(const std::string& type, const std::string& criterias)
ParseCritirias(criterias);
}
void ArchiveConf::SetCriterias(const ArchiveConf::CriteriaT& criterial) {
for(auto& pair : criterial) {
criterias_[pair.first] = pair.second;
}
}
void ArchiveConf::ParseCritirias(const std::string& criterias) {
std::stringstream ss(criterias);
std::vector<std::string> tokens;

View File

@ -19,14 +19,20 @@ static constexpr uint64_t ONE_KB = 1024;
static constexpr uint64_t ONE_MB = ONE_KB*ONE_KB;
static constexpr uint64_t ONE_GB = ONE_KB*ONE_MB;
static const std::string ARCHIVE_CONF_DISK = "disk";
static const std::string ARCHIVE_CONF_DAYS = "days";
static const std::string ARCHIVE_CONF_DEFAULT = ARCHIVE_CONF_DISK + ":512";
struct ArchiveConf {
using CriteriaT = std::map<std::string, int>;
ArchiveConf(const std::string& type, const std::string& criterias = "disk:512");
ArchiveConf(const std::string& type, const std::string& criterias = ARCHIVE_CONF_DEFAULT);
const std::string& GetType() const { return type_; }
const CriteriaT GetCriterias() const { return criterias_; }
void SetCriterias(const ArchiveConf::CriteriaT& criterial);
private:
void ParseCritirias(const std::string& type);
void ParseType(const std::string& criterias);
@ -41,13 +47,20 @@ struct DBMetaOptions {
ArchiveConf archive_conf = ArchiveConf("delete");
}; // DBMetaOptions
struct Options {
typedef enum {
SINGLE,
CLUSTER,
READ_ONLY
} MODE;
Options();
uint16_t memory_sync_interval = 1; //unit: second
uint16_t merge_trigger_number = 2;
size_t index_trigger_size = ONE_GB; //unit: byte
DBMetaOptions meta;
int mode = MODE::SINGLE;
}; // Options

View File

@ -15,33 +15,46 @@ namespace engine {
class Status {
public:
Status() noexcept : state_(nullptr) {}
~Status() { delete[] state_; }
Status(const Status &rhs);
Status &operator=(const Status &rhs);
Status &
operator=(const Status &rhs);
Status(Status &&rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; }
Status &operator=(Status &&rhs_) noexcept;
static Status OK() { return Status(); }
static Status NotFound(const std::string &msg, const std::string &msg2 = "") {
Status &
operator=(Status &&rhs_) noexcept;
static Status
OK() { return Status(); }
static Status
NotFound(const std::string &msg, const std::string &msg2 = "") {
return Status(kNotFound, msg, msg2);
}
static Status Error(const std::string &msg, const std::string &msg2 = "") {
static Status
Error(const std::string &msg, const std::string &msg2 = "") {
return Status(kError, msg, msg2);
}
static Status InvalidDBPath(const std::string &msg, const std::string &msg2 = "") {
static Status
InvalidDBPath(const std::string &msg, const std::string &msg2 = "") {
return Status(kInvalidDBPath, msg, msg2);
}
static Status GroupError(const std::string &msg, const std::string &msg2 = "") {
static Status
GroupError(const std::string &msg, const std::string &msg2 = "") {
return Status(kGroupError, msg, msg2);
}
static Status DBTransactionError(const std::string &msg, const std::string &msg2 = "") {
static Status
DBTransactionError(const std::string &msg, const std::string &msg2 = "") {
return Status(kDBTransactionError, msg, msg2);
}
static Status AlreadyExist(const std::string &msg, const std::string &msg2 = "") {
static Status
AlreadyExist(const std::string &msg, const std::string &msg2 = "") {
return Status(kAlreadyExist, msg, msg2);
}

View File

@ -24,14 +24,6 @@ TaskDispatchQueue::Put(const ScheduleContextPtr &context) {
return;
}
if (queue_.size() >= capacity_) {
std::string error_msg =
"blocking queue is full, capacity: " + std::to_string(capacity_) + " queue_size: " +
std::to_string(queue_.size());
SERVER_LOG_ERROR << error_msg;
throw server::ServerException(server::SERVER_BLOCKING_QUEUE_EMPTY, error_msg);
}
TaskDispatchStrategy::Schedule(context, queue_);
empty_.notify_all();
@ -42,12 +34,6 @@ TaskDispatchQueue::Take() {
std::unique_lock <std::mutex> lock(mtx);
empty_.wait(lock, [this] { return !queue_.empty(); });
if (queue_.empty()) {
std::string error_msg = "blocking queue empty";
SERVER_LOG_ERROR << error_msg;
throw server::ServerException(server::SERVER_BLOCKING_QUEUE_EMPTY, error_msg);
}
ScheduleTaskPtr front(queue_.front());
queue_.pop_front();
full_.notify_all();

View File

@ -74,20 +74,26 @@ public:
}
std::string table_id = context->table_id();
for(auto iter = task_list.begin(); iter != task_list.end(); ++iter) {
//put delete task to proper position
//for example: task_list has 10 IndexLoadTask, only the No.5 IndexLoadTask is for table1
//if user want to delete table1, the DeleteTask will be insert into No.6 position
for(std::list<ScheduleTaskPtr>::reverse_iterator iter = task_list.rbegin(); iter != task_list.rend(); ++iter) {
if((*iter)->type() != ScheduleTaskType::kIndexLoad) {
continue;
}
//put delete task to proper position
IndexLoadTaskPtr loader = std::static_pointer_cast<IndexLoadTask>(*iter);
if(loader->file_->table_id_ == table_id) {
task_list.insert(++iter, delete_task);
break;
if(loader->file_->table_id_ != table_id) {
continue;
}
task_list.insert(iter.base(), delete_task);
return true;
}
//no task is searching this table, put DeleteTask to front of list so that the table will be delete asap
task_list.push_front(delete_task);
return true;
}
};

View File

@ -1,69 +1,69 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include <boost/serialization/access.hpp>
#include <string>
#include <map>
class GPUInfoFile {
public:
GPUInfoFile() = default;
GPUInfoFile(const int &device_count, const std::map<int, std::string> &uuid_encryption_map)
: device_count_(device_count), uuid_encryption_map_(uuid_encryption_map) {}
int get_device_count() {
return device_count_;
}
std::map<int, std::string> &get_uuid_encryption_map() {
return uuid_encryption_map_;
}
public:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & device_count_;
ar & uuid_encryption_map_;
}
public:
int device_count_ = 0;
std::map<int, std::string> uuid_encryption_map_;
};
class SerializedGPUInfoFile {
public:
~SerializedGPUInfoFile() {
if (gpu_info_file_ != nullptr) {
delete (gpu_info_file_);
gpu_info_file_ = nullptr;
}
}
void
set_gpu_info_file(GPUInfoFile *gpu_info_file) {
gpu_info_file_ = gpu_info_file;
}
GPUInfoFile *get_gpu_info_file() {
return gpu_info_file_;
}
private:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & gpu_info_file_;
}
private:
GPUInfoFile *gpu_info_file_ = nullptr;
};
///*******************************************************************************
// * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// * Unauthorized copying of this file, via any medium is strictly prohibited.
// * Proprietary and confidential.
// ******************************************************************************/
//#pragma once
//
//#include <boost/serialization/access.hpp>
//#include <string>
//#include <map>
//
//
//class GPUInfoFile {
// public:
// GPUInfoFile() = default;
//
// GPUInfoFile(const int &device_count, const std::map<int, std::string> &uuid_encryption_map)
// : device_count_(device_count), uuid_encryption_map_(uuid_encryption_map) {}
//
// int get_device_count() {
// return device_count_;
// }
// std::map<int, std::string> &get_uuid_encryption_map() {
// return uuid_encryption_map_;
// }
//
//
// public:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & device_count_;
// ar & uuid_encryption_map_;
// }
//
// public:
// int device_count_ = 0;
// std::map<int, std::string> uuid_encryption_map_;
//};
//
//class SerializedGPUInfoFile {
// public:
// ~SerializedGPUInfoFile() {
// if (gpu_info_file_ != nullptr) {
// delete (gpu_info_file_);
// gpu_info_file_ = nullptr;
// }
// }
//
// void
// set_gpu_info_file(GPUInfoFile *gpu_info_file) {
// gpu_info_file_ = gpu_info_file;
// }
//
// GPUInfoFile *get_gpu_info_file() {
// return gpu_info_file_;
// }
// private:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & gpu_info_file_;
// }
//
// private:
// GPUInfoFile *gpu_info_file_ = nullptr;
//};

View File

@ -1,83 +1,83 @@
#include "utils/Log.h"
#include "LicenseLibrary.h"
#include "utils/Error.h"
#include <iostream>
#include <getopt.h>
#include <memory.h>
// Not provide path: current work path will be used and system.info.
using namespace zilliz::milvus;
void
print_usage(const std::string &app_name) {
printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
printf(" Options:\n");
printf(" -h --help Print this help\n");
printf(" -s --sysinfo filename Generate system info file as given name\n");
printf("\n");
}
int main(int argc, char *argv[]) {
std::string app_name = argv[0];
if (argc != 1 && argc != 3) {
print_usage(app_name);
return EXIT_FAILURE;
}
static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
{"help", no_argument, 0, 'h'},
{NULL, 0, 0, 0}};
int value = 0;
int option_index = 0;
std::string system_info_filename = "./system.info";
while ((value = getopt_long(argc, argv, "s:h", long_options, &option_index)) != -1) {
switch (value) {
case 's': {
char *system_info_filename_ptr = strdup(optarg);
system_info_filename = system_info_filename_ptr;
free(system_info_filename_ptr);
// printf("Generate system info file: %s\n", system_info_filename.c_str());
break;
}
case 'h':print_usage(app_name);
return EXIT_SUCCESS;
case '?':print_usage(app_name);
return EXIT_FAILURE;
default:print_usage(app_name);
break;
}
}
int device_count = 0;
server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
if (err != server::SERVER_SUCCESS) return -1;
// 1. Get All GPU UUID
std::vector<std::string> uuid_array;
err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
if (err != server::SERVER_SUCCESS) return -1;
// 2. Get UUID SHA256
std::vector<std::string> uuid_sha256_array;
err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
if (err != server::SERVER_SUCCESS) return -1;
// 3. Generate GPU ID map with GPU UUID
std::map<int, std::string> uuid_encrption_map;
for (int i = 0; i < device_count; ++i) {
uuid_encrption_map[i] = uuid_sha256_array[i];
}
// 4. Generate GPU_info File
err = server::LicenseLibrary::GPUinfoFileSerialization(system_info_filename,
device_count,
uuid_encrption_map);
if (err != server::SERVER_SUCCESS) return -1;
printf("Generate GPU_info File Success\n");
return 0;
}
//
//#include "utils/Log.h"
//#include "LicenseLibrary.h"
//#include "utils/Error.h"
//
//#include <iostream>
//#include <getopt.h>
//#include <memory.h>
//// Not provide path: current work path will be used and system.info.
//using namespace zilliz::milvus;
//
//void
//print_usage(const std::string &app_name) {
// printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
// printf(" Options:\n");
// printf(" -h --help Print this help\n");
// printf(" -s --sysinfo filename Generate system info file as given name\n");
// printf("\n");
//}
//
//int main(int argc, char *argv[]) {
// std::string app_name = argv[0];
// if (argc != 1 && argc != 3) {
// print_usage(app_name);
// return EXIT_FAILURE;
// }
//
// static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
// {"help", no_argument, 0, 'h'},
// {NULL, 0, 0, 0}};
// int value = 0;
// int option_index = 0;
// std::string system_info_filename = "./system.info";
// while ((value = getopt_long(argc, argv, "s:h", long_options, &option_index)) != -1) {
// switch (value) {
// case 's': {
// char *system_info_filename_ptr = strdup(optarg);
// system_info_filename = system_info_filename_ptr;
// free(system_info_filename_ptr);
//// printf("Generate system info file: %s\n", system_info_filename.c_str());
// break;
// }
// case 'h':print_usage(app_name);
// return EXIT_SUCCESS;
// case '?':print_usage(app_name);
// return EXIT_FAILURE;
// default:print_usage(app_name);
// break;
// }
// }
//
// int device_count = 0;
// server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
// if (err != server::SERVER_SUCCESS) return -1;
//
// // 1. Get All GPU UUID
// std::vector<std::string> uuid_array;
// err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
// if (err != server::SERVER_SUCCESS) return -1;
//
// // 2. Get UUID SHA256
// std::vector<std::string> uuid_sha256_array;
// err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
// if (err != server::SERVER_SUCCESS) return -1;
//
// // 3. Generate GPU ID map with GPU UUID
// std::map<int, std::string> uuid_encrption_map;
// for (int i = 0; i < device_count; ++i) {
// uuid_encrption_map[i] = uuid_sha256_array[i];
// }
//
//
// // 4. Generate GPU_info File
// err = server::LicenseLibrary::GPUinfoFileSerialization(system_info_filename,
// device_count,
// uuid_encrption_map);
// if (err != server::SERVER_SUCCESS) return -1;
//
// printf("Generate GPU_info File Success\n");
//
//
// return 0;
//}

View File

@ -1,131 +1,131 @@
#include "LicenseCheck.h"
#include <iostream>
#include <thread>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
//#include <boost/foreach.hpp>
//#include <boost/serialization/vector.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/serialization/map.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/thread.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
namespace zilliz {
namespace milvus {
namespace server {
LicenseCheck::LicenseCheck() {
}
LicenseCheck::~LicenseCheck() {
StopCountingDown();
}
ServerError
LicenseCheck::LegalityCheck(const std::string &license_file_path) {
int device_count;
LicenseLibrary::GetDeviceCount(device_count);
std::vector<std::string> uuid_array;
LicenseLibrary::GetUUID(device_count, uuid_array);
std::vector<std::string> sha_array;
LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, sha_array);
int output_device_count;
std::map<int, std::string> uuid_encryption_map;
time_t starting_time;
time_t end_time;
ServerError err = LicenseLibrary::LicenseFileDeserialization(license_file_path,
output_device_count,
uuid_encryption_map,
starting_time,
end_time);
if(err !=SERVER_SUCCESS)
{
std::cout << "License check error: 01" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
time_t system_time;
LicenseLibrary::GetSystemTime(system_time);
if (device_count != output_device_count) {
std::cout << "License check error: 02" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
for (int i = 0; i < device_count; ++i) {
if (sha_array[i] != uuid_encryption_map[i]) {
std::cout << "License check error: 03" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
}
if (system_time < starting_time || system_time > end_time) {
std::cout << "License check error: 04" << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
std::cout << "Legality Check Success" << std::endl;
return SERVER_SUCCESS;
}
// Part 2: Timing check license
ServerError
LicenseCheck::AlterFile(const std::string &license_file_path,
const boost::system::error_code &ec,
boost::asio::deadline_timer *pt) {
ServerError err = LicenseCheck::LegalityCheck(license_file_path);
if(err!=SERVER_SUCCESS) {
std::cout << "license file check error" << std::endl;
exit(1);
}
std::cout << "---runing---" << std::endl;
pt->expires_at(pt->expires_at() + boost::posix_time::hours(1));
pt->async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, pt));
return SERVER_SUCCESS;
}
ServerError
LicenseCheck::StartCountingDown(const std::string &license_file_path) {
if (!LicenseLibrary::IsFileExistent(license_file_path)) {
std::cout << "license file not exist" << std::endl;
exit(1);
}
//create a thread to run AlterFile
if(counting_thread_ == nullptr) {
counting_thread_ = std::make_shared<std::thread>([&]() {
boost::asio::deadline_timer t(io_service_, boost::posix_time::hours(1));
t.async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, &t));
io_service_.run();//this thread will block here
});
}
return SERVER_SUCCESS;
}
ServerError
LicenseCheck::StopCountingDown() {
if(!io_service_.stopped()) {
io_service_.stop();
}
if(counting_thread_ != nullptr) {
counting_thread_->join();
counting_thread_ = nullptr;
}
return SERVER_SUCCESS;
}
}
}
}
//#include "LicenseCheck.h"
//#include <iostream>
//#include <thread>
//
//#include <boost/archive/binary_oarchive.hpp>
//#include <boost/archive/binary_iarchive.hpp>
////#include <boost/foreach.hpp>
////#include <boost/serialization/vector.hpp>
//#include <boost/filesystem/path.hpp>
//#include <boost/serialization/map.hpp>
//#include <boost/filesystem/operations.hpp>
//#include <boost/thread.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>
//
//
//namespace zilliz {
//namespace milvus {
//namespace server {
//
//LicenseCheck::LicenseCheck() {
//
//}
//
//LicenseCheck::~LicenseCheck() {
// StopCountingDown();
//}
//
//ServerError
//LicenseCheck::LegalityCheck(const std::string &license_file_path) {
//
// int device_count;
// LicenseLibrary::GetDeviceCount(device_count);
// std::vector<std::string> uuid_array;
// LicenseLibrary::GetUUID(device_count, uuid_array);
//
// std::vector<std::string> sha_array;
// LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, sha_array);
//
// int output_device_count;
// std::map<int, std::string> uuid_encryption_map;
// time_t starting_time;
// time_t end_time;
// ServerError err = LicenseLibrary::LicenseFileDeserialization(license_file_path,
// output_device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// if(err !=SERVER_SUCCESS)
// {
// std::cout << "License check error: 01" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// time_t system_time;
// LicenseLibrary::GetSystemTime(system_time);
//
// if (device_count != output_device_count) {
// std::cout << "License check error: 02" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// for (int i = 0; i < device_count; ++i) {
// if (sha_array[i] != uuid_encryption_map[i]) {
// std::cout << "License check error: 03" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// }
// if (system_time < starting_time || system_time > end_time) {
// std::cout << "License check error: 04" << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
// std::cout << "Legality Check Success" << std::endl;
// return SERVER_SUCCESS;
//}
//
//// Part 2: Timing check license
//
//ServerError
//LicenseCheck::AlterFile(const std::string &license_file_path,
// const boost::system::error_code &ec,
// boost::asio::deadline_timer *pt) {
//
// ServerError err = LicenseCheck::LegalityCheck(license_file_path);
// if(err!=SERVER_SUCCESS) {
// std::cout << "license file check error" << std::endl;
// exit(1);
// }
//
// std::cout << "---runing---" << std::endl;
// pt->expires_at(pt->expires_at() + boost::posix_time::hours(1));
// pt->async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, pt));
//
// return SERVER_SUCCESS;
//
//}
//
//ServerError
//LicenseCheck::StartCountingDown(const std::string &license_file_path) {
//
// if (!LicenseLibrary::IsFileExistent(license_file_path)) {
// std::cout << "license file not exist" << std::endl;
// exit(1);
// }
//
// //create a thread to run AlterFile
// if(counting_thread_ == nullptr) {
// counting_thread_ = std::make_shared<std::thread>([&]() {
// boost::asio::deadline_timer t(io_service_, boost::posix_time::hours(1));
// t.async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, &t));
// io_service_.run();//this thread will block here
// });
// }
//
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseCheck::StopCountingDown() {
// if(!io_service_.stopped()) {
// io_service_.stop();
// }
//
// if(counting_thread_ != nullptr) {
// counting_thread_->join();
// counting_thread_ = nullptr;
// }
//
// return SERVER_SUCCESS;
//}
//
//}
//}
//}

View File

@ -1,52 +1,52 @@
#pragma once
#include "utils/Error.h"
#include "LicenseLibrary.h"
#include <boost/asio.hpp>
#include <thread>
#include <memory>
namespace zilliz {
namespace milvus {
namespace server {
class LicenseCheck {
private:
LicenseCheck();
~LicenseCheck();
public:
static LicenseCheck &
GetInstance() {
static LicenseCheck instance;
return instance;
};
static ServerError
LegalityCheck(const std::string &license_file_path);
ServerError
StartCountingDown(const std::string &license_file_path);
ServerError
StopCountingDown();
private:
static ServerError
AlterFile(const std::string &license_file_path,
const boost::system::error_code &ec,
boost::asio::deadline_timer *pt);
private:
boost::asio::io_service io_service_;
std::shared_ptr<std::thread> counting_thread_;
};
}
}
}
//#pragma once
//
//#include "utils/Error.h"
//#include "LicenseLibrary.h"
//
//#include <boost/asio.hpp>
//
//#include <thread>
//#include <memory>
//
//namespace zilliz {
//namespace milvus {
//namespace server {
//
//class LicenseCheck {
//private:
// LicenseCheck();
// ~LicenseCheck();
//
//public:
// static LicenseCheck &
// GetInstance() {
// static LicenseCheck instance;
// return instance;
// };
//
// static ServerError
// LegalityCheck(const std::string &license_file_path);
//
// ServerError
// StartCountingDown(const std::string &license_file_path);
//
// ServerError
// StopCountingDown();
//
//private:
// static ServerError
// AlterFile(const std::string &license_file_path,
// const boost::system::error_code &ec,
// boost::asio::deadline_timer *pt);
//
//private:
// boost::asio::io_service io_service_;
// std::shared_ptr<std::thread> counting_thread_;
//
//};
//
//}
//}
//}
//
//

View File

@ -1,86 +1,86 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include <boost/serialization/access.hpp>
#include <string>
#include <map>
class LicenseFile {
public:
LicenseFile() = default;
LicenseFile(const int &device_count,
const std::map<int, std::string> &uuid_encryption_map,
const time_t &starting_time,
const time_t &end_time)
: device_count_(device_count),
uuid_encryption_map_(uuid_encryption_map),
starting_time_(starting_time),
end_time_(end_time) {}
int get_device_count() {
return device_count_;
}
std::map<int, std::string> &get_uuid_encryption_map() {
return uuid_encryption_map_;
}
time_t get_starting_time() {
return starting_time_;
}
time_t get_end_time() {
return end_time_;
}
public:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & device_count_;
ar & uuid_encryption_map_;
ar & starting_time_;
ar & end_time_;
}
public:
int device_count_ = 0;
std::map<int, std::string> uuid_encryption_map_;
time_t starting_time_ = 0;
time_t end_time_ = 0;
};
class SerializedLicenseFile {
public:
~SerializedLicenseFile() {
if (license_file_ != nullptr) {
delete (license_file_);
license_file_ = nullptr;
}
}
void
set_license_file(LicenseFile *license_file) {
license_file_ = license_file;
}
LicenseFile *get_license_file() {
return license_file_;
}
private:
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive &ar, const unsigned int version) {
ar & license_file_;
}
private:
LicenseFile *license_file_ = nullptr;
};
///*******************************************************************************
// * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// * Unauthorized copying of this file, via any medium is strictly prohibited.
// * Proprietary and confidential.
// ******************************************************************************/
//#pragma once
//
//
//#include <boost/serialization/access.hpp>
//#include <string>
//#include <map>
//
//
//class LicenseFile {
// public:
// LicenseFile() = default;
//
// LicenseFile(const int &device_count,
// const std::map<int, std::string> &uuid_encryption_map,
// const time_t &starting_time,
// const time_t &end_time)
// : device_count_(device_count),
// uuid_encryption_map_(uuid_encryption_map),
// starting_time_(starting_time),
// end_time_(end_time) {}
//
// int get_device_count() {
// return device_count_;
// }
// std::map<int, std::string> &get_uuid_encryption_map() {
// return uuid_encryption_map_;
// }
// time_t get_starting_time() {
// return starting_time_;
// }
// time_t get_end_time() {
// return end_time_;
// }
//
// public:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & device_count_;
// ar & uuid_encryption_map_;
// ar & starting_time_;
// ar & end_time_;
// }
//
// public:
// int device_count_ = 0;
// std::map<int, std::string> uuid_encryption_map_;
// time_t starting_time_ = 0;
// time_t end_time_ = 0;
//};
//
//class SerializedLicenseFile {
// public:
// ~SerializedLicenseFile() {
// if (license_file_ != nullptr) {
// delete (license_file_);
// license_file_ = nullptr;
// }
// }
//
// void
// set_license_file(LicenseFile *license_file) {
// license_file_ = license_file;
// }
//
// LicenseFile *get_license_file() {
// return license_file_;
// }
// private:
// friend class boost::serialization::access;
//
// template<typename Archive>
// void serialize(Archive &ar, const unsigned int version) {
// ar & license_file_;
// }
//
// private:
// LicenseFile *license_file_ = nullptr;
//};
//

View File

@ -1,121 +1,121 @@
#include <iostream>
#include <getopt.h>
#include <memory.h>
#include "utils/Log.h"
#include "license/LicenseLibrary.h"
#include "utils/Error.h"
using namespace zilliz::milvus;
// Not provide path: current work path will be used and system.info.
void
print_usage(const std::string &app_name) {
printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
printf(" Options:\n");
printf(" -h --help Print this help\n");
printf(" -s --sysinfo filename sysinfo file location\n");
printf(" -l --license filename Generate license file as given name\n");
printf(" -b --starting time Set start time (format: YYYY-MM-DD)\n");
printf(" -e --end time Set end time (format: YYYY-MM-DD)\n");
printf("\n");
}
int main(int argc, char *argv[]) {
std::string app_name = argv[0];
// if (argc != 1 && argc != 3) {
// print_usage(app_name);
// return EXIT_FAILURE;
//
//#include <iostream>
//#include <getopt.h>
//#include <memory.h>
//
//#include "utils/Log.h"
//#include "license/LicenseLibrary.h"
//#include "utils/Error.h"
//
//
//using namespace zilliz::milvus;
//// Not provide path: current work path will be used and system.info.
//
//void
//print_usage(const std::string &app_name) {
// printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
// printf(" Options:\n");
// printf(" -h --help Print this help\n");
// printf(" -s --sysinfo filename sysinfo file location\n");
// printf(" -l --license filename Generate license file as given name\n");
// printf(" -b --starting time Set start time (format: YYYY-MM-DD)\n");
// printf(" -e --end time Set end time (format: YYYY-MM-DD)\n");
// printf("\n");
//}
//
//int main(int argc, char *argv[]) {
// std::string app_name = argv[0];
//// if (argc != 1 && argc != 3) {
//// print_usage(app_name);
//// return EXIT_FAILURE;
//// }
// static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
// {"license", optional_argument, 0, 'l'},
// {"help", no_argument, 0, 'h'},
// {"starting_time", required_argument, 0, 'b'},
// {"end_time", required_argument, 0, 'e'},
// {NULL, 0, 0, 0}};
// server::ServerError err;
// int value = 0;
// int option_index = 0;
// std::string system_info_filename = "./system.info";
// std::string license_filename = "./system.license";
// char *string_starting_time = NULL;
// char *string_end_time = NULL;
// time_t starting_time = 0;
// time_t end_time = 0;
// int flag_s = 1;
// int flag_b = 1;
// int flag_e = 1;
// while ((value = getopt_long(argc, argv, "hl:s:b:e:", long_options, NULL)) != -1) {
// switch (value) {
// case 's': {
// flag_s = 0;
// system_info_filename = (std::string) (optarg);
// break;
// }
// case 'b': {
// flag_b = 0;
// string_starting_time = optarg;
// break;
// }
// case 'e': {
// flag_e = 0;
// string_end_time = optarg;
// break;
// }
// case 'l': {
// license_filename = (std::string) (optarg);
// break;
// }
// case 'h':print_usage(app_name);
// return EXIT_SUCCESS;
// case '?':print_usage(app_name);
// return EXIT_FAILURE;
// default:print_usage(app_name);
// break;
// }
//
// }
static struct option long_options[] = {{"system_info", required_argument, 0, 's'},
{"license", optional_argument, 0, 'l'},
{"help", no_argument, 0, 'h'},
{"starting_time", required_argument, 0, 'b'},
{"end_time", required_argument, 0, 'e'},
{NULL, 0, 0, 0}};
server::ServerError err;
int value = 0;
int option_index = 0;
std::string system_info_filename = "./system.info";
std::string license_filename = "./system.license";
char *string_starting_time = NULL;
char *string_end_time = NULL;
time_t starting_time = 0;
time_t end_time = 0;
int flag_s = 1;
int flag_b = 1;
int flag_e = 1;
while ((value = getopt_long(argc, argv, "hl:s:b:e:", long_options, NULL)) != -1) {
switch (value) {
case 's': {
flag_s = 0;
system_info_filename = (std::string) (optarg);
break;
}
case 'b': {
flag_b = 0;
string_starting_time = optarg;
break;
}
case 'e': {
flag_e = 0;
string_end_time = optarg;
break;
}
case 'l': {
license_filename = (std::string) (optarg);
break;
}
case 'h':print_usage(app_name);
return EXIT_SUCCESS;
case '?':print_usage(app_name);
return EXIT_FAILURE;
default:print_usage(app_name);
break;
}
}
if (flag_s) {
printf("Error: sysinfo file location must be entered\n");
return 1;
}
if (flag_b) {
printf("Error: start time must be entered\n");
return 1;
}
if (flag_e) {
printf("Error: end time must be entered\n");
return 1;
}
err = server::LicenseLibrary::GetDateTime(string_starting_time, starting_time);
if (err != server::SERVER_SUCCESS) return -1;
err = server::LicenseLibrary::GetDateTime(string_end_time, end_time);
if (err != server::SERVER_SUCCESS) return -1;
int output_info_device_count = 0;
std::map<int, std::string> output_info_uuid_encrption_map;
err = server::LicenseLibrary::GPUinfoFileDeserialization(system_info_filename,
output_info_device_count,
output_info_uuid_encrption_map);
if (err != server::SERVER_SUCCESS) return -1;
err = server::LicenseLibrary::LicenseFileSerialization(license_filename,
output_info_device_count,
output_info_uuid_encrption_map,
starting_time,
end_time);
if (err != server::SERVER_SUCCESS) return -1;
printf("Generate License File Success\n");
return 0;
}
// if (flag_s) {
// printf("Error: sysinfo file location must be entered\n");
// return 1;
// }
// if (flag_b) {
// printf("Error: start time must be entered\n");
// return 1;
// }
// if (flag_e) {
// printf("Error: end time must be entered\n");
// return 1;
// }
//
// err = server::LicenseLibrary::GetDateTime(string_starting_time, starting_time);
// if (err != server::SERVER_SUCCESS) return -1;
//
// err = server::LicenseLibrary::GetDateTime(string_end_time, end_time);
// if (err != server::SERVER_SUCCESS) return -1;
//
//
// int output_info_device_count = 0;
// std::map<int, std::string> output_info_uuid_encrption_map;
//
//
// err = server::LicenseLibrary::GPUinfoFileDeserialization(system_info_filename,
// output_info_device_count,
// output_info_uuid_encrption_map);
// if (err != server::SERVER_SUCCESS) return -1;
//
//
// err = server::LicenseLibrary::LicenseFileSerialization(license_filename,
// output_info_device_count,
// output_info_uuid_encrption_map,
// starting_time,
// end_time);
// if (err != server::SERVER_SUCCESS) return -1;
//
//
// printf("Generate License File Success\n");
//
// return 0;
//}

View File

@ -1,345 +1,345 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include "LicenseLibrary.h"
#include "utils/Log.h"
#include <cuda_runtime.h>
#include <nvml.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
//#include <boost/foreach.hpp>
//#include <boost/serialization/vector.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/serialization/map.hpp>
#include <boost/filesystem/operations.hpp>
namespace zilliz {
namespace milvus {
namespace server {
constexpr int LicenseLibrary::sha256_length_;
// Part 0: File check
bool
LicenseLibrary::IsFileExistent(const std::string &path) {
boost::system::error_code error;
auto file_status = boost::filesystem::status(path, error);
if (error) {
return false;
}
if (!boost::filesystem::exists(file_status)) {
return false;
}
return !boost::filesystem::is_directory(file_status);
}
// Part 1: Get GPU Info
ServerError
LicenseLibrary::GetDeviceCount(int &device_count) {
nvmlReturn_t result = nvmlInit();
if (NVML_SUCCESS != result) {
printf("Failed to initialize NVML: %s\n", nvmlErrorString(result));
return SERVER_UNEXPECTED_ERROR;
}
cudaError_t error_id = cudaGetDeviceCount(&device_count);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int) error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
return SERVER_UNEXPECTED_ERROR;
}
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GetUUID(int device_count, std::vector<std::string> &uuid_array) {
if (device_count == 0) {
printf("There are no available device(s) that support CUDA\n");
return SERVER_UNEXPECTED_ERROR;
}
for (int dev = 0; dev < device_count; ++dev) {
nvmlDevice_t device;
nvmlReturn_t result = nvmlDeviceGetHandleByIndex(dev, &device);
if (NVML_SUCCESS != result) {
printf("Failed to get handle for device %i: %s\n", dev, nvmlErrorString(result));
return SERVER_UNEXPECTED_ERROR;
}
char uuid[80];
unsigned int length = 80;
nvmlReturn_t err = nvmlDeviceGetUUID(device, uuid, length);
if (err != NVML_SUCCESS) {
printf("nvmlDeviceGetUUID error: %d\n", err);
return SERVER_UNEXPECTED_ERROR;
}
uuid_array.emplace_back(uuid);
}
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GetUUIDMD5(int device_count,
std::vector<std::string> &uuid_array,
std::vector<std::string> &md5_array) {
MD5_CTX ctx;
unsigned char outmd[16];
char temp[2];
std::string md5;
for (int dev = 0; dev < device_count; ++dev) {
md5.clear();
memset(outmd, 0, sizeof(outmd));
MD5_Init(&ctx);
MD5_Update(&ctx, uuid_array[dev].c_str(), uuid_array[dev].size());
MD5_Final(outmd, &ctx);
for (int i = 0; i < 16; ++i) {
std::snprintf(temp, 2, "%02X", outmd[i]);
md5 += temp;
}
md5_array.push_back(md5);
}
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GetUUIDSHA256(const int &device_count,
std::vector<std::string> &uuid_array,
std::vector<std::string> &sha_array) {
SHA256_CTX ctx;
unsigned char outmd[sha256_length_];
char temp[2];
std::string sha;
for (int dev = 0; dev < device_count; ++dev) {
sha.clear();
memset(outmd, 0, sizeof(outmd));
SHA256_Init(&ctx);
SHA256_Update(&ctx, uuid_array[dev].c_str(), uuid_array[dev].size());
SHA256_Final(outmd, &ctx);
for (int i = 0; i < sha256_length_; ++i) {
std::snprintf(temp, 2, "%02X", outmd[i]);
sha += temp;
}
sha_array.push_back(sha);
}
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GetSystemTime(time_t &system_time) {
system_time = time(NULL);
return SERVER_SUCCESS;
}
// Part 2: Handle License File
ServerError
LicenseLibrary::LicenseFileSerialization(const std::string &path,
int device_count,
const std::map<int, std::string> &uuid_encrption_map,
time_t starting_time,
time_t end_time) {
std::ofstream file(path);
boost::archive::binary_oarchive oa(file);
oa.register_type<LicenseFile>();
SerializedLicenseFile serialized_license_file;
serialized_license_file.set_license_file(new LicenseFile(device_count,
uuid_encrption_map,
starting_time,
end_time));
oa << serialized_license_file;
file.close();
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::LicenseFileDeserialization(const std::string &path,
int &device_count,
std::map<int, std::string> &uuid_encrption_map,
time_t &starting_time,
time_t &end_time) {
if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
std::ifstream file(path);
boost::archive::binary_iarchive ia(file);
ia.register_type<LicenseFile>();
SerializedLicenseFile serialized_license_file;
ia >> serialized_license_file;
device_count = serialized_license_file.get_license_file()->get_device_count();
uuid_encrption_map = serialized_license_file.get_license_file()->get_uuid_encryption_map();
starting_time = serialized_license_file.get_license_file()->get_starting_time();
end_time = serialized_license_file.get_license_file()->get_end_time();
file.close();
return SERVER_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////////////
//// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
//// Unauthorized copying of this file, via any medium is strictly prohibited.
//// Proprietary and confidential.
//////////////////////////////////////////////////////////////////////////////////
//
//#include "LicenseLibrary.h"
//#include "utils/Log.h"
//#include <cuda_runtime.h>
//#include <nvml.h>
//#include <openssl/md5.h>
//#include <openssl/sha.h>
//
//#include <boost/archive/binary_oarchive.hpp>
//#include <boost/archive/binary_iarchive.hpp>
////#include <boost/foreach.hpp>
////#include <boost/serialization/vector.hpp>
//#include <boost/filesystem/path.hpp>
//#include <boost/serialization/map.hpp>
//#include <boost/filesystem/operations.hpp>
//
//
//namespace zilliz {
//namespace milvus {
//namespace server {
//
//constexpr int LicenseLibrary::sha256_length_;
//
//// Part 0: File check
//bool
//LicenseLibrary::IsFileExistent(const std::string &path) {
//
// boost::system::error_code error;
// auto file_status = boost::filesystem::status(path, error);
// if (error) {
// return false;
// }
//
// if (!boost::filesystem::exists(file_status)) {
// return false;
// }
//
// return !boost::filesystem::is_directory(file_status);
//}
//
//// Part 1: Get GPU Info
//ServerError
//LicenseLibrary::SecretFileSerialization(const std::string &path,
// const time_t &update_time,
// const off_t &file_size,
// const time_t &starting_time,
// const time_t &end_time,
// const std::string &file_md5) {
//LicenseLibrary::GetDeviceCount(int &device_count) {
// nvmlReturn_t result = nvmlInit();
// if (NVML_SUCCESS != result) {
// printf("Failed to initialize NVML: %s\n", nvmlErrorString(result));
// return SERVER_UNEXPECTED_ERROR;
// }
// cudaError_t error_id = cudaGetDeviceCount(&device_count);
// if (error_id != cudaSuccess) {
// printf("cudaGetDeviceCount returned %d\n-> %s\n", (int) error_id, cudaGetErrorString(error_id));
// printf("Result = FAIL\n");
// return SERVER_UNEXPECTED_ERROR;
// }
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseLibrary::GetUUID(int device_count, std::vector<std::string> &uuid_array) {
// if (device_count == 0) {
// printf("There are no available device(s) that support CUDA\n");
// return SERVER_UNEXPECTED_ERROR;
// }
//
// for (int dev = 0; dev < device_count; ++dev) {
// nvmlDevice_t device;
// nvmlReturn_t result = nvmlDeviceGetHandleByIndex(dev, &device);
// if (NVML_SUCCESS != result) {
// printf("Failed to get handle for device %i: %s\n", dev, nvmlErrorString(result));
// return SERVER_UNEXPECTED_ERROR;
// }
//
// char uuid[80];
// unsigned int length = 80;
// nvmlReturn_t err = nvmlDeviceGetUUID(device, uuid, length);
// if (err != NVML_SUCCESS) {
// printf("nvmlDeviceGetUUID error: %d\n", err);
// return SERVER_UNEXPECTED_ERROR;
// }
//
// uuid_array.emplace_back(uuid);
// }
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseLibrary::GetUUIDMD5(int device_count,
// std::vector<std::string> &uuid_array,
// std::vector<std::string> &md5_array) {
// MD5_CTX ctx;
// unsigned char outmd[16];
// char temp[2];
// std::string md5;
// for (int dev = 0; dev < device_count; ++dev) {
// md5.clear();
// memset(outmd, 0, sizeof(outmd));
// MD5_Init(&ctx);
// MD5_Update(&ctx, uuid_array[dev].c_str(), uuid_array[dev].size());
// MD5_Final(outmd, &ctx);
// for (int i = 0; i < 16; ++i) {
// std::snprintf(temp, 2, "%02X", outmd[i]);
// md5 += temp;
// }
// md5_array.push_back(md5);
// }
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseLibrary::GetUUIDSHA256(const int &device_count,
// std::vector<std::string> &uuid_array,
// std::vector<std::string> &sha_array) {
// SHA256_CTX ctx;
// unsigned char outmd[sha256_length_];
// char temp[2];
// std::string sha;
// for (int dev = 0; dev < device_count; ++dev) {
// sha.clear();
// memset(outmd, 0, sizeof(outmd));
// SHA256_Init(&ctx);
// SHA256_Update(&ctx, uuid_array[dev].c_str(), uuid_array[dev].size());
// SHA256_Final(outmd, &ctx);
// for (int i = 0; i < sha256_length_; ++i) {
// std::snprintf(temp, 2, "%02X", outmd[i]);
// sha += temp;
// }
// sha_array.push_back(sha);
// }
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseLibrary::GetSystemTime(time_t &system_time) {
// system_time = time(NULL);
// return SERVER_SUCCESS;
//}
//
//// Part 2: Handle License File
//ServerError
//LicenseLibrary::LicenseFileSerialization(const std::string &path,
// int device_count,
// const std::map<int, std::string> &uuid_encrption_map,
// time_t starting_time,
// time_t end_time) {
//
// std::ofstream file(path);
// boost::archive::binary_oarchive oa(file);
// oa.register_type<SecretFile>();
// oa.register_type<LicenseFile>();
//
// SerializedSecretFile serialized_secret_file;
// SerializedLicenseFile serialized_license_file;
//
// serialized_secret_file.set_secret_file(new SecretFile(update_time, file_size, starting_time, end_time, file_md5));
// oa << serialized_secret_file;
// serialized_license_file.set_license_file(new LicenseFile(device_count,
// uuid_encrption_map,
// starting_time,
// end_time));
// oa << serialized_license_file;
//
// file.close();
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseLibrary::SecretFileDeserialization(const std::string &path,
// time_t &update_time,
// off_t &file_size,
// time_t &starting_time,
// time_t &end_time,
// std::string &file_md5) {
//LicenseLibrary::LicenseFileDeserialization(const std::string &path,
// int &device_count,
// std::map<int, std::string> &uuid_encrption_map,
// time_t &starting_time,
// time_t &end_time) {
// if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
// std::ifstream file(path);
// boost::archive::binary_iarchive ia(file);
// ia.register_type<LicenseFile>();
//
// SerializedLicenseFile serialized_license_file;
// ia >> serialized_license_file;
//
// device_count = serialized_license_file.get_license_file()->get_device_count();
// uuid_encrption_map = serialized_license_file.get_license_file()->get_uuid_encryption_map();
// starting_time = serialized_license_file.get_license_file()->get_starting_time();
// end_time = serialized_license_file.get_license_file()->get_end_time();
//
// file.close();
// return SERVER_SUCCESS;
//}
//
////ServerError
////LicenseLibrary::SecretFileSerialization(const std::string &path,
//// const time_t &update_time,
//// const off_t &file_size,
//// const time_t &starting_time,
//// const time_t &end_time,
//// const std::string &file_md5) {
//// std::ofstream file(path);
//// boost::archive::binary_oarchive oa(file);
//// oa.register_type<SecretFile>();
////
//// SerializedSecretFile serialized_secret_file;
////
//// serialized_secret_file.set_secret_file(new SecretFile(update_time, file_size, starting_time, end_time, file_md5));
//// oa << serialized_secret_file;
////
//// file.close();
//// return SERVER_SUCCESS;
////}
////
////ServerError
////LicenseLibrary::SecretFileDeserialization(const std::string &path,
//// time_t &update_time,
//// off_t &file_size,
//// time_t &starting_time,
//// time_t &end_time,
//// std::string &file_md5) {
//// if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
////
//// std::ifstream file(path);
//// boost::archive::binary_iarchive ia(file);
//// ia.register_type<SecretFile>();
//// SerializedSecretFile serialized_secret_file;
////
//// ia >> serialized_secret_file;
//// update_time = serialized_secret_file.get_secret_file()->get_update_time();
//// file_size = serialized_secret_file.get_secret_file()->get_file_size();
//// starting_time = serialized_secret_file.get_secret_file()->get_starting_time();
//// end_time = serialized_secret_file.get_secret_file()->get_end_time();
//// file_md5 = serialized_secret_file.get_secret_file()->get_file_md5();
//// file.close();
//// return SERVER_SUCCESS;
////}
//
//
//
//// Part 3: File attribute: UpdateTime Time/ Size/ MD5
//ServerError
//LicenseLibrary::GetFileUpdateTimeAndSize(const std::string &path, time_t &update_time, off_t &file_size) {
//
// if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
//
// struct stat buf;
// int err_no = stat(path.c_str(), &buf);
// if (err_no != 0) {
// std::cout << strerror(err_no) << std::endl;
// return SERVER_UNEXPECTED_ERROR;
// }
//
// update_time = buf.st_mtime;
// file_size = buf.st_size;
//
// return SERVER_SUCCESS;
//}
//
//ServerError
//LicenseLibrary::GetFileMD5(const std::string &path, std::string &filemd5) {
//
// if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
//
// filemd5.clear();
//
// std::ifstream file(path.c_str(), std::ifstream::binary);
// if (!file) {
// return -1;
// }
//
// MD5_CTX md5Context;
// MD5_Init(&md5Context);
//
// char buf[1024 * 16];
// while (file.good()) {
// file.read(buf, sizeof(buf));
// MD5_Update(&md5Context, buf, file.gcount());
// }
//
// unsigned char result[MD5_DIGEST_LENGTH];
// MD5_Final(result, &md5Context);
//
// char hex[35];
// memset(hex, 0, sizeof(hex));
// for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) {
// sprintf(hex + i * 2, "%02X", result[i]);
// }
// hex[32] = '\0';
// filemd5 = std::string(hex);
//
// return SERVER_SUCCESS;
//}
//// Part 4: GPU Info File Serialization/Deserialization
//ServerError
//LicenseLibrary::GPUinfoFileSerialization(const std::string &path,
// int device_count,
// const std::map<int, std::string> &uuid_encrption_map) {
// std::ofstream file(path);
// boost::archive::binary_oarchive oa(file);
// oa.register_type<GPUInfoFile>();
//
// SerializedGPUInfoFile serialized_gpu_info_file;
//
// serialized_gpu_info_file.set_gpu_info_file(new GPUInfoFile(device_count, uuid_encrption_map));
// oa << serialized_gpu_info_file;
//
// file.close();
// return SERVER_SUCCESS;
//}
//ServerError
//LicenseLibrary::GPUinfoFileDeserialization(const std::string &path,
// int &device_count,
// std::map<int, std::string> &uuid_encrption_map) {
// if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
//
// std::ifstream file(path);
// boost::archive::binary_iarchive ia(file);
// ia.register_type<SecretFile>();
// SerializedSecretFile serialized_secret_file;
// ia.register_type<GPUInfoFile>();
//
// SerializedGPUInfoFile serialized_gpu_info_file;
// ia >> serialized_gpu_info_file;
//
// device_count = serialized_gpu_info_file.get_gpu_info_file()->get_device_count();
// uuid_encrption_map = serialized_gpu_info_file.get_gpu_info_file()->get_uuid_encryption_map();
//
// ia >> serialized_secret_file;
// update_time = serialized_secret_file.get_secret_file()->get_update_time();
// file_size = serialized_secret_file.get_secret_file()->get_file_size();
// starting_time = serialized_secret_file.get_secret_file()->get_starting_time();
// end_time = serialized_secret_file.get_secret_file()->get_end_time();
// file_md5 = serialized_secret_file.get_secret_file()->get_file_md5();
// file.close();
// return SERVER_SUCCESS;
//}
// Part 3: File attribute: UpdateTime Time/ Size/ MD5
ServerError
LicenseLibrary::GetFileUpdateTimeAndSize(const std::string &path, time_t &update_time, off_t &file_size) {
if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
struct stat buf;
int err_no = stat(path.c_str(), &buf);
if (err_no != 0) {
std::cout << strerror(err_no) << std::endl;
return SERVER_UNEXPECTED_ERROR;
}
update_time = buf.st_mtime;
file_size = buf.st_size;
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GetFileMD5(const std::string &path, std::string &filemd5) {
if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
filemd5.clear();
std::ifstream file(path.c_str(), std::ifstream::binary);
if (!file) {
return -1;
}
MD5_CTX md5Context;
MD5_Init(&md5Context);
char buf[1024 * 16];
while (file.good()) {
file.read(buf, sizeof(buf));
MD5_Update(&md5Context, buf, file.gcount());
}
unsigned char result[MD5_DIGEST_LENGTH];
MD5_Final(result, &md5Context);
char hex[35];
memset(hex, 0, sizeof(hex));
for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) {
sprintf(hex + i * 2, "%02X", result[i]);
}
hex[32] = '\0';
filemd5 = std::string(hex);
return SERVER_SUCCESS;
}
// Part 4: GPU Info File Serialization/Deserialization
ServerError
LicenseLibrary::GPUinfoFileSerialization(const std::string &path,
int device_count,
const std::map<int, std::string> &uuid_encrption_map) {
std::ofstream file(path);
boost::archive::binary_oarchive oa(file);
oa.register_type<GPUInfoFile>();
SerializedGPUInfoFile serialized_gpu_info_file;
serialized_gpu_info_file.set_gpu_info_file(new GPUInfoFile(device_count, uuid_encrption_map));
oa << serialized_gpu_info_file;
file.close();
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GPUinfoFileDeserialization(const std::string &path,
int &device_count,
std::map<int, std::string> &uuid_encrption_map) {
if (!IsFileExistent(path)) return SERVER_LICENSE_FILE_NOT_EXIST;
std::ifstream file(path);
boost::archive::binary_iarchive ia(file);
ia.register_type<GPUInfoFile>();
SerializedGPUInfoFile serialized_gpu_info_file;
ia >> serialized_gpu_info_file;
device_count = serialized_gpu_info_file.get_gpu_info_file()->get_device_count();
uuid_encrption_map = serialized_gpu_info_file.get_gpu_info_file()->get_uuid_encryption_map();
file.close();
return SERVER_SUCCESS;
}
ServerError
LicenseLibrary::GetDateTime(const char *cha, time_t &data_time) {
tm tm_;
int year, month, day;
sscanf(cha, "%d-%d-%d", &year, &month, &day);
tm_.tm_year = year - 1900;
tm_.tm_mon = month - 1;
tm_.tm_mday = day;
tm_.tm_hour = 0;
tm_.tm_min = 0;
tm_.tm_sec = 0;
tm_.tm_isdst = 0;
data_time = mktime(&tm_);
return SERVER_SUCCESS;
}
}
}
}
//
//ServerError
//LicenseLibrary::GetDateTime(const char *cha, time_t &data_time) {
// tm tm_;
// int year, month, day;
// sscanf(cha, "%d-%d-%d", &year, &month, &day);
// tm_.tm_year = year - 1900;
// tm_.tm_mon = month - 1;
// tm_.tm_mday = day;
// tm_.tm_hour = 0;
// tm_.tm_min = 0;
// tm_.tm_sec = 0;
// tm_.tm_isdst = 0;
// data_time = mktime(&tm_);
// return SERVER_SUCCESS;
//
//}
//
//}
//}
//}

View File

@ -1,105 +1,105 @@
#pragma once
#include "LicenseFile.h"
#include "GPUInfoFile.h"
#include "utils/Error.h"
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <vector>
#include <map>
#include <time.h>
namespace zilliz {
namespace milvus {
namespace server {
class LicenseLibrary {
public:
// Part 0: File check
static bool
IsFileExistent(const std::string &path);
// Part 1: Get GPU Info
static ServerError
GetDeviceCount(int &device_count);
static ServerError
GetUUID(int device_count, std::vector<std::string> &uuid_array);
static ServerError
GetUUIDMD5(int device_count, std::vector<std::string> &uuid_array, std::vector<std::string> &md5_array);
static ServerError
GetUUIDSHA256(const int &device_count,
std::vector<std::string> &uuid_array,
std::vector<std::string> &sha_array);
static ServerError
GetSystemTime(time_t &system_time);
// Part 2: Handle License File
static ServerError
LicenseFileSerialization(const std::string &path,
int device_count,
const std::map<int, std::string> &uuid_encrption_map,
time_t starting_time,
time_t end_time);
static ServerError
LicenseFileDeserialization(const std::string &path,
int &device_count,
std::map<int, std::string> &uuid_encrption_map,
time_t &starting_time,
time_t &end_time);
//#pragma once
//
//#include "LicenseFile.h"
//#include "GPUInfoFile.h"
//
//#include "utils/Error.h"
//
//#include <boost/asio.hpp>
//#include <boost/thread.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>
//
//#include <vector>
//#include <map>
//#include <time.h>
//
//
//namespace zilliz {
//namespace milvus {
//namespace server {
//
//class LicenseLibrary {
// public:
// // Part 0: File check
// static bool
// IsFileExistent(const std::string &path);
//
// // Part 1: Get GPU Info
// static ServerError
// SecretFileSerialization(const std::string &path,
// const time_t &update_time,
// const off_t &file_size,
// const time_t &starting_time,
// const time_t &end_time,
// const std::string &file_md5);
// GetDeviceCount(int &device_count);
//
// static ServerError
// SecretFileDeserialization(const std::string &path,
// time_t &update_time,
// off_t &file_size,
// time_t &starting_time,
// time_t &end_time,
// std::string &file_md5);
// Part 3: File attribute: UpdateTime Time/ Size/ MD5
static ServerError
GetFileUpdateTimeAndSize(const std::string &path, time_t &update_time, off_t &file_size);
static ServerError
GetFileMD5(const std::string &path, std::string &filemd5);
// Part 4: GPU Info File Serialization/Deserialization
static ServerError
GPUinfoFileSerialization(const std::string &path,
int device_count,
const std::map<int, std::string> &uuid_encrption_map);
static ServerError
GPUinfoFileDeserialization(const std::string &path,
int &device_count,
std::map<int, std::string> &uuid_encrption_map);
static ServerError
GetDateTime(const char *cha, time_t &data_time);
private:
static constexpr int sha256_length_ = 32;
};
}
}
}
// GetUUID(int device_count, std::vector<std::string> &uuid_array);
//
// static ServerError
// GetUUIDMD5(int device_count, std::vector<std::string> &uuid_array, std::vector<std::string> &md5_array);
//
//
// static ServerError
// GetUUIDSHA256(const int &device_count,
// std::vector<std::string> &uuid_array,
// std::vector<std::string> &sha_array);
//
// static ServerError
// GetSystemTime(time_t &system_time);
//
// // Part 2: Handle License File
// static ServerError
// LicenseFileSerialization(const std::string &path,
// int device_count,
// const std::map<int, std::string> &uuid_encrption_map,
// time_t starting_time,
// time_t end_time);
//
// static ServerError
// LicenseFileDeserialization(const std::string &path,
// int &device_count,
// std::map<int, std::string> &uuid_encrption_map,
// time_t &starting_time,
// time_t &end_time);
//
//// static ServerError
//// SecretFileSerialization(const std::string &path,
//// const time_t &update_time,
//// const off_t &file_size,
//// const time_t &starting_time,
//// const time_t &end_time,
//// const std::string &file_md5);
////
//// static ServerError
//// SecretFileDeserialization(const std::string &path,
//// time_t &update_time,
//// off_t &file_size,
//// time_t &starting_time,
//// time_t &end_time,
//// std::string &file_md5);
//
// // Part 3: File attribute: UpdateTime Time/ Size/ MD5
// static ServerError
// GetFileUpdateTimeAndSize(const std::string &path, time_t &update_time, off_t &file_size);
//
// static ServerError
// GetFileMD5(const std::string &path, std::string &filemd5);
//
// // Part 4: GPU Info File Serialization/Deserialization
// static ServerError
// GPUinfoFileSerialization(const std::string &path,
// int device_count,
// const std::map<int, std::string> &uuid_encrption_map);
// static ServerError
// GPUinfoFileDeserialization(const std::string &path,
// int &device_count,
// std::map<int, std::string> &uuid_encrption_map);
//
// static ServerError
// GetDateTime(const char *cha, time_t &data_time);
//
//
// private:
// static constexpr int sha256_length_ = 32;
//};
//
//
//}
//}
//}

View File

@ -22,40 +22,21 @@ class MetricsBase{
}
virtual ServerError Init() {};
virtual void AddGroupSuccessTotalIncrement(double value = 1) {};
virtual void AddGroupFailTotalIncrement(double value = 1) {};
virtual void HasGroupSuccessTotalIncrement(double value = 1) {};
virtual void HasGroupFailTotalIncrement(double value = 1) {};
virtual void GetGroupSuccessTotalIncrement(double value = 1) {};
virtual void GetGroupFailTotalIncrement(double value = 1) {};
virtual void GetGroupFilesSuccessTotalIncrement(double value = 1) {};
virtual void GetGroupFilesFailTotalIncrement(double value = 1) {};
virtual void AddVectorsSuccessTotalIncrement(double value = 1) {};
virtual void AddVectorsFailTotalIncrement(double value = 1) {};
virtual void AddVectorsDurationHistogramOberve(double value) {};
virtual void SearchSuccessTotalIncrement(double value = 1) {};
virtual void SearchFailTotalIncrement(double value = 1) {};
virtual void SearchDurationHistogramObserve(double value) {};
virtual void RawFileSizeHistogramObserve(double value) {};
virtual void IndexFileSizeHistogramObserve(double value) {};
virtual void BuildIndexDurationSecondsHistogramObserve(double value) {};
virtual void AllBuildIndexDurationSecondsHistogramObserve(double value) {};
virtual void CacheUsageGaugeIncrement(double value = 1) {};
virtual void CacheUsageGaugeDecrement(double value = 1) {};
virtual void CacheUsageGaugeSet(double value) {};
virtual void MetaVisitTotalIncrement(double value = 1) {};
virtual void MetaVisitDurationSecondsHistogramObserve(double value) {};
virtual void MemUsagePercentGaugeSet(double value) {};
virtual void MemUsagePercentGaugeIncrement(double value = 1) {};
virtual void MemUsagePercentGaugeDecrement(double value = 1) {};
virtual void MemUsageTotalGaugeSet(double value) {};
virtual void MemUsageTotalGaugeIncrement(double value = 1) {};
virtual void MemUsageTotalGaugeDecrement(double value = 1) {};
virtual void MetaAccessTotalIncrement(double value = 1) {};
virtual void MetaAccessDurationSecondsHistogramObserve(double value) {};
virtual void FaissDiskLoadDurationSecondsHistogramObserve(double value) {};
virtual void FaissDiskLoadSizeBytesHistogramObserve(double value) {};
virtual void FaissDiskLoadIOSpeedHistogramObserve(double value) {};
virtual void CacheAccessTotalIncrement(double value = 1) {};
virtual void MemTableMergeDurationSecondsHistogramObserve(double value) {};
virtual void SearchIndexDataDurationSecondsHistogramObserve(double value) {};

View File

@ -17,7 +17,8 @@ ServerError
PrometheusMetrics::Init() {
try {
ConfigNode &configNode = ServerConfig::GetInstance().GetConfig(CONFIG_METRIC);
startup_ = configNode.GetValue(CONFIG_METRIC_IS_STARTUP) == "true" ? true : false;
startup_ = configNode.GetValue(CONFIG_METRIC_IS_STARTUP) == "on";
if(!startup_) return SERVER_SUCCESS;
// Following should be read from config file.
const std::string bind_address = configNode.GetChild(CONFIG_PROMETHEUS).GetValue(CONFIG_METRIC_PROMETHEUS_PORT);
const std::string uri = std::string("/metrics");
@ -59,9 +60,6 @@ PrometheusMetrics::GPUPercentGaugeSet() {
if(!startup_) return;
int numDevide = server::SystemInfo::GetInstance().num_device();
std::vector<unsigned int> values = server::SystemInfo::GetInstance().GPUPercent();
// for (int i = 0; i < numDevide; ++i) {
// GPU_percent_gauges_[i].Set(static_cast<double>(values[i]));
// }
if(numDevide >= 1) GPU0_percent_gauge_.Set(static_cast<double>(values[0]));
if(numDevide >= 2) GPU1_percent_gauge_.Set(static_cast<double>(values[1]));
if(numDevide >= 3) GPU2_percent_gauge_.Set(static_cast<double>(values[2]));
@ -76,13 +74,10 @@ PrometheusMetrics::GPUPercentGaugeSet() {
void PrometheusMetrics::GPUMemoryUsageGaugeSet() {
if(!startup_) return;
int numDevide = server::SystemInfo::GetInstance().num_device();
std::vector<unsigned long long> values = server::SystemInfo::GetInstance().GPUMemoryUsed();
constexpr unsigned long long MtoB = 1024*1024;
int numDevice = values.size();
// for (int i = 0; i < numDevice; ++i) {
// GPU_memory_usage_gauges_[i].Set(values[i]/MtoB);
// }
if(numDevice >=1) GPU0_memory_usage_gauge_.Set(values[0]/MtoB);
if(numDevice >=2) GPU1_memory_usage_gauge_.Set(values[1]/MtoB);
if(numDevice >=3) GPU2_memory_usage_gauge_.Set(values[2]/MtoB);
@ -145,21 +140,6 @@ void PrometheusMetrics::OctetsSet() {
outoctets_gauge_.Set((in_and_out_octets.second-old_outoctets)/total_second);
}
//void PrometheusMetrics::GpuPercentInit() {
// int num_device = SystemInfo::GetInstance().num_device();
// constexpr char device_number[] = "DeviceNum";
// for(int i = 0; i < num_device; ++ i) {
// GPU_percent_gauges_.emplace_back(GPU_percent_.Add({{device_number,std::to_string(i)}}));
// }
//
//}
//void PrometheusMetrics::GpuMemoryInit() {
// int num_device = SystemInfo::GetInstance().num_device();
// constexpr char device_number[] = "DeviceNum";
// for(int i = 0; i < num_device; ++ i) {
// GPU_memory_usage_gauges_.emplace_back(GPU_memory_usage_.Add({{device_number,std::to_string(i)}}));
// }
//}
}

View File

@ -34,10 +34,6 @@ class PrometheusMetrics: public MetricsBase {
public:
static PrometheusMetrics &
GetInstance() {
// switch(MetricCollectorType) {
// case: prometheus::
// static
// }
static PrometheusMetrics instance;
return instance;
}
@ -49,46 +45,21 @@ class PrometheusMetrics: public MetricsBase {
std::shared_ptr<prometheus::Exposer> exposer_ptr_;
std::shared_ptr<prometheus::Registry> registry_ = std::make_shared<prometheus::Registry>();
bool startup_ = false;
// void GpuPercentInit();
// void GpuMemoryInit();
public:
void AddGroupSuccessTotalIncrement(double value = 1.0) override { if(startup_) add_group_success_total_.Increment(value);};
void AddGroupFailTotalIncrement(double value = 1.0) override { if(startup_) add_group_fail_total_.Increment(value);};
void HasGroupSuccessTotalIncrement(double value = 1.0) override { if(startup_) has_group_success_total_.Increment(value);};
void HasGroupFailTotalIncrement(double value = 1.0) override { if(startup_) has_group_fail_total_.Increment(value);};
void GetGroupSuccessTotalIncrement(double value = 1.0) override { if(startup_) get_group_success_total_.Increment(value);};
void GetGroupFailTotalIncrement(double value = 1.0) override { if(startup_) get_group_fail_total_.Increment(value);};
void GetGroupFilesSuccessTotalIncrement(double value = 1.0) override { if(startup_) get_group_files_success_total_.Increment(value);};
void GetGroupFilesFailTotalIncrement(double value = 1.0) override { if(startup_) get_group_files_fail_total_.Increment(value);};
void SetStartup(bool startup) {startup_ = startup;};
void AddVectorsSuccessTotalIncrement(double value = 1.0) override { if(startup_) add_vectors_success_total_.Increment(value);};
void AddVectorsFailTotalIncrement(double value = 1.0) override { if(startup_) add_vectors_fail_total_.Increment(value);};
void AddVectorsDurationHistogramOberve(double value) override { if(startup_) add_vectors_duration_histogram_.Observe(value);};
void SearchSuccessTotalIncrement(double value = 1.0) override { if(startup_) search_success_total_.Increment(value);};
void SearchFailTotalIncrement(double value = 1.0) override { if(startup_) search_fail_total_.Increment(value); };
void SearchDurationHistogramObserve(double value) override { if(startup_) search_duration_histogram_.Observe(value);};
void RawFileSizeHistogramObserve(double value) override { if(startup_) raw_files_size_histogram_.Observe(value);};
void IndexFileSizeHistogramObserve(double value) override { if(startup_) index_files_size_histogram_.Observe(value);};
void BuildIndexDurationSecondsHistogramObserve(double value) override { if(startup_) build_index_duration_seconds_histogram_.Observe(value);};
void AllBuildIndexDurationSecondsHistogramObserve(double value) override { if(startup_) all_build_index_duration_seconds_histogram_.Observe(value);};
void CacheUsageGaugeIncrement(double value = 1.0) override { if(startup_) cache_usage_gauge_.Increment(value);};
void CacheUsageGaugeDecrement(double value = 1.0) override { if(startup_) cache_usage_gauge_.Decrement(value);};
void CacheUsageGaugeSet(double value) override { if(startup_) cache_usage_gauge_.Set(value);};
// void MetaVisitTotalIncrement(double value = 1.0) override { meta_visit_total_.Increment(value);};
// void MetaVisitDurationSecondsHistogramObserve(double value) override { meta_visit_duration_seconds_histogram_.Observe(value);};
void MemUsagePercentGaugeSet(double value) override { if(startup_) mem_usage_percent_gauge_.Set(value);};
void MemUsagePercentGaugeIncrement(double value = 1.0) override { if(startup_) mem_usage_percent_gauge_.Increment(value);};
void MemUsagePercentGaugeDecrement(double value = 1.0) override { if(startup_) mem_usage_percent_gauge_.Decrement(value);};
void MemUsageTotalGaugeSet(double value) override { if(startup_) mem_usage_total_gauge_.Set(value);};
void MemUsageTotalGaugeIncrement(double value = 1.0) override { if(startup_) mem_usage_total_gauge_.Increment(value);};
void MemUsageTotalGaugeDecrement(double value = 1.0) override { if(startup_) mem_usage_total_gauge_.Decrement(value);};
void MetaAccessTotalIncrement(double value = 1) override { if(startup_) meta_access_total_.Increment(value);};
void MetaAccessDurationSecondsHistogramObserve(double value) override { if(startup_) meta_access_duration_seconds_histogram_.Observe(value);};
void FaissDiskLoadDurationSecondsHistogramObserve(double value) override { if(startup_) faiss_disk_load_duration_seconds_histogram_.Observe(value);};
void FaissDiskLoadSizeBytesHistogramObserve(double value) override { if(startup_) faiss_disk_load_size_bytes_histogram_.Observe(value);};
// void FaissDiskLoadIOSpeedHistogramObserve(double value) { if(startup_) faiss_disk_load_IO_speed_histogram_.Observe(value);};
void FaissDiskLoadIOSpeedGaugeSet(double value) override { if(startup_) faiss_disk_load_IO_speed_gauge_.Set(value);};
void CacheAccessTotalIncrement(double value = 1) override { if(startup_) cache_access_total_.Increment(value);};

View File

@ -135,46 +135,6 @@ SystemInfo::CPUPercent() {
return percent;
}
//std::unordered_map<int,std::vector<double>>
//SystemInfo::GetGPUMemPercent(){
// // return GPUID: MEM%
//
// //write GPU info to a file
// system("nvidia-smi pmon -c 1 > GPUInfo.txt");
// int pid = (int)getpid();
//
// //parse line
// std::ifstream read_file;
// read_file.open("GPUInfo.txt");
// std::string line;
// while(getline(read_file, line)){
// std::vector<std::string> words = split(line);
// // 0 1 2 3 4 5 6 7
// //words stand for gpuindex, pid, type, sm, mem, enc, dec, command respectively
// if(std::stoi(words[1]) != pid) continue;
// int GPUindex = std::stoi(words[0]);
// double sm_percent = std::stod(words[3]);
// double mem_percent = std::stod(words[4]);
//
// }
//
//}
//std::vector<std::string>
//SystemInfo::split(std::string input) {
// std::vector<std::string> words;
// input += " ";
// int word_start = 0;
// for (int i = 0; i < input.size(); ++i) {
// if(input[i] != ' ') continue;
// if(input[i] == ' ') {
// word_start = i + 1;
// continue;
// }
// words.push_back(input.substr(word_start,i-word_start));
// }
// return words;
//}
std::vector<unsigned int>
SystemInfo::GPUPercent() {

View File

@ -59,8 +59,6 @@ class SystemInfo {
double MemoryPercent();
double CPUPercent();
std::pair<unsigned long long , unsigned long long > Octets();
// std::unordered_map<int,std::vector<double>> GetGPUMemPercent() {};
// std::vector<std::string> split(std::string input) {};
std::vector<unsigned int> GPUPercent();
std::vector<unsigned long long> GPUMemoryUsed();

View File

@ -32,4 +32,4 @@ target_link_libraries(milvus_sdk
add_subdirectory(examples)
install(TARGETS milvus_sdk DESTINATION bin)
install(TARGETS milvus_sdk DESTINATION lib)

View File

@ -56,7 +56,7 @@ namespace {
<< std::to_string(result.query_result_arrays.size())
<< " search result:" << std::endl;
for(auto& item : result.query_result_arrays) {
std::cout << "\t" << std::to_string(item.id) << "\tscore:" << std::to_string(item.score);
std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance);
std::cout << std::endl;
}
}
@ -165,6 +165,11 @@ ClientTest::Test(const std::string& address, const std::string& port) {
Status stat = conn->CreateTable(tb_schema);
std::cout << "CreateTable function call status: " << stat.ToString() << std::endl;
PrintTableSchema(tb_schema);
bool has_table = conn->HasTable(tb_schema.table_name);
if(has_table) {
std::cout << "Table is created" << std::endl;
}
}
{//describe table

View File

@ -59,7 +59,7 @@ struct RowRecord {
*/
struct QueryResult {
int64_t id; ///< Output result
double score; ///< Vector similarity score: 0 ~ 100
double distance; ///< Vector similarity distance
};
/**
@ -156,6 +156,18 @@ public:
virtual Status CreateTable(const TableSchema &param) = 0;
/**
* @brief Test table existence method
*
* This method is used to create table
*
* @param table_name, table name is going to be tested.
*
* @return Indicate if table is cexist
*/
virtual bool HasTable(const std::string &table_name) = 0;
/**
* @brief Delete table method
*

View File

@ -4,7 +4,6 @@
* Proprietary and confidential.
******************************************************************************/
#include "ClientProxy.h"
#include "util/ConvertUtil.h"
namespace milvus {
@ -102,6 +101,15 @@ ClientProxy::CreateTable(const TableSchema &param) {
return Status::OK();
}
bool
ClientProxy::HasTable(const std::string &table_name) {
if(!IsConnected()) {
return false;
}
return ClientPtr()->interface()->HasTable(table_name);
}
Status
ClientProxy::DeleteTable(const std::string &table_name) {
if(!IsConnected()) {
@ -195,7 +203,7 @@ ClientProxy::SearchVector(const std::string &table_name,
for(auto& thrift_query_result : thrift_topk_result.query_result_arrays) {
QueryResult query_result;
query_result.id = thrift_query_result.id;
query_result.score = thrift_query_result.score;
query_result.distance = thrift_query_result.distance;
result.query_result_arrays.emplace_back(query_result);
}

View File

@ -23,6 +23,8 @@ public:
virtual Status CreateTable(const TableSchema &param) override;
virtual bool HasTable(const std::string &table_name) override;
virtual Status DeleteTable(const std::string &table_name) override;
virtual Status AddVector(const std::string &table_name,

View File

@ -56,6 +56,11 @@ ConnectionImpl::CreateTable(const TableSchema &param) {
return client_proxy_->CreateTable(param);
}
bool
ConnectionImpl::HasTable(const std::string &table_name) {
return client_proxy_->HasTable(table_name);
}
Status
ConnectionImpl::DeleteTable(const std::string &table_name) {
return client_proxy_->DeleteTable(table_name);

View File

@ -25,6 +25,8 @@ public:
virtual Status CreateTable(const TableSchema &param) override;
virtual bool HasTable(const std::string &table_name) override;
virtual Status DeleteTable(const std::string &table_name) override;
virtual Status AddVector(const std::string &table_name,

View File

@ -1,44 +0,0 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#include "ConvertUtil.h"
#include "Exception.h"
#include <map>
namespace milvus {
static const std::string INDEX_RAW = "raw";
static const std::string INDEX_IVFFLAT = "ivfflat";
std::string ConvertUtil::IndexType2Str(IndexType index) {
static const std::map<IndexType, std::string> s_index2str = {
{IndexType::cpu_idmap, INDEX_RAW},
{IndexType::gpu_ivfflat, INDEX_IVFFLAT}
};
const auto& iter = s_index2str.find(index);
if(iter == s_index2str.end()) {
throw Exception(StatusCode::InvalidAgument, "Invalid index type");
}
return iter->second;
}
IndexType ConvertUtil::Str2IndexType(const std::string& type) {
static const std::map<std::string, IndexType> s_str2index = {
{INDEX_RAW, IndexType::cpu_idmap},
{INDEX_IVFFLAT, IndexType::gpu_ivfflat}
};
const auto& iter = s_str2index.find(type);
if(iter == s_str2index.end()) {
throw Exception(StatusCode::InvalidAgument, "Invalid index type");
}
return iter->second;
}
}

View File

@ -1,18 +0,0 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include "MilvusApi.h"
namespace milvus {
class ConvertUtil {
public:
static std::string IndexType2Str(IndexType index);
static IndexType Str2IndexType(const std::string& type);
};
}

View File

@ -24,12 +24,51 @@ DBWrapper::DBWrapper() {
opt.index_trigger_size = (size_t)index_size * engine::ONE_MB;
}
CommonUtil::CreateDirectory(opt.meta.path);
ConfigNode& serverConfig = ServerConfig::GetInstance().GetConfig(CONFIG_SERVER);
std::string mode = serverConfig.GetValue(CONFIG_CLUSTER_MODE, "single");
if (mode == "single") {
opt.mode = zilliz::milvus::engine::Options::MODE::SINGLE;
}
else if (mode == "cluster") {
opt.mode = zilliz::milvus::engine::Options::MODE::CLUSTER;
}
else if (mode == "read_only") {
opt.mode = zilliz::milvus::engine::Options::MODE::READ_ONLY;
}
else {
std::cout << "ERROR: mode specified in server_config is not one of ['single', 'cluster', 'read_only']" << std::endl;
kill(0, SIGUSR1);
}
//set archive config
engine::ArchiveConf::CriteriaT criterial;
int64_t disk = config.GetInt64Value(CONFIG_DB_ARCHIVE_DISK, 0);
int64_t days = config.GetInt64Value(CONFIG_DB_ARCHIVE_DAYS, 0);
if(disk > 0) {
criterial[engine::ARCHIVE_CONF_DISK] = disk;
}
if(days > 0) {
criterial[engine::ARCHIVE_CONF_DAYS] = days;
}
opt.meta.archive_conf.SetCriterias(criterial);
//create db root folder
ServerError err = CommonUtil::CreateDirectory(opt.meta.path);
if(err != SERVER_SUCCESS) {
std::cout << "ERROR! Failed to create database root path: " << opt.meta.path << std::endl;
kill(0, SIGUSR1);
}
std::string msg = opt.meta.path;
try {
zilliz::milvus::engine::DB::Open(opt, &db_);
} catch(std::exception& ex) {
msg = ex.what();
}
zilliz::milvus::engine::DB::Open(opt, &db_);
if(db_ == nullptr) {
SERVER_LOG_ERROR << "Failed to open db";
throw ServerException(SERVER_NULL_POINTER, "Failed to open db");
std::cout << "ERROR! Failed to open database: " << msg << std::endl;
kill(0, SIGUSR1);
}
}

View File

@ -23,6 +23,7 @@
#include <thrift/concurrency/PosixThreadFactory.h>
#include <thread>
#include <iostream>
namespace zilliz {
namespace milvus {
@ -92,7 +93,8 @@ MilvusServer::StartService() {
return;
}
} catch (apache::thrift::TException& ex) {
//SERVER_LOG_ERROR << "Server encounter exception: " << ex.what();
std::cout << "ERROR! " << ex.what() << std::endl;
kill(0, SIGUSR1);
}
}

View File

@ -24,6 +24,15 @@ RequestHandler::CreateTable(const thrift::TableSchema &param) {
RequestScheduler::ExecTask(task_ptr);
}
bool
RequestHandler::HasTable(const std::string &table_name) {
bool has_table = false;
BaseTaskPtr task_ptr = HasTableTask::Create(table_name, has_table);
RequestScheduler::ExecTask(task_ptr);
return has_table;
}
void
RequestHandler::DeleteTable(const std::string &table_name) {
BaseTaskPtr task_ptr = DeleteTableTask::Create(table_name);

View File

@ -19,16 +19,28 @@ public:
RequestHandler();
/**
* @brief Create table method
*
* This method is used to create table
*
* @param param, use to provide table information to be created.
*
*
* @param param
*/
void CreateTable(const ::milvus::thrift::TableSchema& param);
* @brief Create table method
*
* This method is used to create table
*
* @param param, use to provide table information to be created.
*
*
* @param param
*/
void CreateTable(const ::milvus::thrift::TableSchema &param);
/**
* @brief Test table existence method
*
* This method is used to test table existence.
*
* @param table_name, table name is going to be tested.
*
*
* @param table_name
*/
bool HasTable(const std::string &table_name);
/**
* @brief Delete table method

View File

@ -18,35 +18,35 @@ using namespace ::milvus;
namespace {
const std::map<ServerError, thrift::ErrorCode::type> &ErrorMap() {
static const std::map<ServerError, thrift::ErrorCode::type> code_map = {
{SERVER_UNEXPECTED_ERROR, thrift::ErrorCode::ILLEGAL_ARGUMENT},
{SERVER_NULL_POINTER, thrift::ErrorCode::ILLEGAL_ARGUMENT},
{SERVER_UNEXPECTED_ERROR, thrift::ErrorCode::UNEXPECTED_ERROR},
{SERVER_UNSUPPORTED_ERROR, thrift::ErrorCode::UNEXPECTED_ERROR},
{SERVER_NULL_POINTER, thrift::ErrorCode::UNEXPECTED_ERROR},
{SERVER_INVALID_ARGUMENT, thrift::ErrorCode::ILLEGAL_ARGUMENT},
{SERVER_FILE_NOT_FOUND, thrift::ErrorCode::ILLEGAL_ARGUMENT},
{SERVER_NOT_IMPLEMENT, thrift::ErrorCode::ILLEGAL_ARGUMENT},
{SERVER_BLOCKING_QUEUE_EMPTY, thrift::ErrorCode::ILLEGAL_ARGUMENT},
{SERVER_FILE_NOT_FOUND, thrift::ErrorCode::FILE_NOT_FOUND},
{SERVER_NOT_IMPLEMENT, thrift::ErrorCode::UNEXPECTED_ERROR},
{SERVER_BLOCKING_QUEUE_EMPTY, thrift::ErrorCode::UNEXPECTED_ERROR},
{SERVER_CANNOT_CREATE_FOLDER, thrift::ErrorCode::CANNOT_CREATE_FOLDER},
{SERVER_CANNOT_CREATE_FILE, thrift::ErrorCode::CANNOT_CREATE_FILE},
{SERVER_CANNOT_DELETE_FOLDER, thrift::ErrorCode::CANNOT_DELETE_FOLDER},
{SERVER_CANNOT_DELETE_FILE, thrift::ErrorCode::CANNOT_DELETE_FILE},
{SERVER_TABLE_NOT_EXIST, thrift::ErrorCode::TABLE_NOT_EXISTS},
{SERVER_INVALID_TABLE_NAME, thrift::ErrorCode::ILLEGAL_TABLE_NAME},
{SERVER_INVALID_TABLE_DIMENSION, thrift::ErrorCode::ILLEGAL_DIMENSION},
{SERVER_INVALID_TIME_RANGE, thrift::ErrorCode::ILLEGAL_RANGE},
{SERVER_INVALID_VECTOR_DIMENSION, thrift::ErrorCode::ILLEGAL_DIMENSION},
{SERVER_INVALID_INDEX_TYPE, thrift::ErrorCode::ILLEGAL_INDEX_TYPE},
{SERVER_INVALID_ROWRECORD, thrift::ErrorCode::ILLEGAL_ROWRECORD},
{SERVER_INVALID_ROWRECORD_ARRAY, thrift::ErrorCode::ILLEGAL_ROWRECORD},
{SERVER_INVALID_TOPK, thrift::ErrorCode::ILLEGAL_TOPK},
{SERVER_ILLEGAL_VECTOR_ID, thrift::ErrorCode::ILLEGAL_VECTOR_ID},
{SERVER_ILLEGAL_SEARCH_RESULT, thrift::ErrorCode::ILLEGAL_SEARCH_RESULT},
{SERVER_CACHE_ERROR, thrift::ErrorCode::CACHE_FAILED},
{DB_META_TRANSACTION_FAILED, thrift::ErrorCode::META_FAILED},
};
return code_map;
}
const std::map<ServerError, std::string> &ErrorMessage() {
static const std::map<ServerError, std::string> msg_map = {
{SERVER_UNEXPECTED_ERROR, "unexpected error occurs"},
{SERVER_NULL_POINTER, "null pointer error"},
{SERVER_INVALID_ARGUMENT, "invalid argument"},
{SERVER_FILE_NOT_FOUND, "file not found"},
{SERVER_NOT_IMPLEMENT, "not implemented"},
{SERVER_BLOCKING_QUEUE_EMPTY, "queue empty"},
{SERVER_TABLE_NOT_EXIST, "table not exist"},
{SERVER_INVALID_TIME_RANGE, "invalid time range"},
{SERVER_INVALID_VECTOR_DIMENSION, "invalid vector dimension"},
};
return msg_map;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -69,6 +69,14 @@ ServerError BaseTask::Execute() {
return error_code_;
}
ServerError BaseTask::SetError(ServerError error_code, const std::string& error_msg) {
error_code_ = error_code;
error_msg_ = error_msg;
SERVER_LOG_ERROR << error_msg_;
return error_code_;
}
ServerError BaseTask::WaitToFinish() {
std::unique_lock <std::mutex> lock(finish_mtx_);
finish_cond_.wait(lock, [this] { return done_; });
@ -102,7 +110,7 @@ void RequestScheduler::ExecTask(BaseTaskPtr& task_ptr) {
ex.__set_code(ErrorMap().at(err));
std::string msg = task_ptr->ErrorMsg();
if(msg.empty()){
msg = ErrorMessage().at(err);
msg = "Error message not set";
}
ex.__set_reason(msg);
throw ex;

View File

@ -34,6 +34,8 @@ public:
protected:
virtual ServerError OnExecute() = 0;
ServerError SetError(ServerError error_code, const std::string& msg);
protected:
mutable std::mutex finish_mtx_;
std::condition_variable finish_cond_;

View File

@ -53,26 +53,27 @@ namespace {
return map_type[type];
}
ServerError
void
ConvertRowRecordToFloatArray(const std::vector<thrift::RowRecord>& record_array,
uint64_t dimension,
std::vector<float>& float_array) {
ServerError error_code;
std::vector<float>& float_array,
ServerError& error_code,
std::string& error_msg) {
uint64_t vec_count = record_array.size();
float_array.resize(vec_count*dimension);//allocate enough memory
for(uint64_t i = 0; i < vec_count; i++) {
const auto& record = record_array[i];
if(record.vector_data.empty()) {
error_code = SERVER_INVALID_ARGUMENT;
SERVER_LOG_ERROR << "No vector provided in record";
return error_code;
error_code = SERVER_INVALID_ROWRECORD;
error_msg = "Rowrecord float array is empty";
return;
}
uint64_t vec_dim = record.vector_data.size()/sizeof(double);//how many double value?
if(vec_dim != dimension) {
SERVER_LOG_ERROR << "Invalid vector dimension: " << vec_dim
<< " vs. group dimension:" << dimension;
error_code = SERVER_INVALID_VECTOR_DIMENSION;
return error_code;
error_msg = "Invalid rowrecord dimension: " + std::to_string(vec_dim)
+ " vs. table dimension:" + std::to_string(dimension);
return;
}
//convert double array to float array(thrift has no float type)
@ -81,30 +82,29 @@ namespace {
float_array[i*vec_dim + d] = (float)(d_p[d]);
}
}
return SERVER_SUCCESS;
}
static constexpr long DAY_SECONDS = 86400;
ServerError
void
ConvertTimeRangeToDBDates(const std::vector<thrift::Range> &range_array,
std::vector<DB_DATE>& dates) {
std::vector<DB_DATE>& dates,
ServerError& error_code,
std::string& error_msg) {
dates.clear();
ServerError error_code;
for(auto& range : range_array) {
time_t tt_start, tt_end;
tm tm_start, tm_end;
if(!CommonUtil::TimeStrToTime(range.start_value, tt_start, tm_start)){
error_code = SERVER_INVALID_TIME_RANGE;
SERVER_LOG_ERROR << "Invalid time range: " << range.start_value;
return error_code;
error_msg = "Invalid time range: " + range.start_value;
return;
}
if(!CommonUtil::TimeStrToTime(range.end_value, tt_end, tm_end)){
error_code = SERVER_INVALID_TIME_RANGE;
SERVER_LOG_ERROR << "Invalid time range: " << range.end_value;
return error_code;
error_msg = "Invalid time range: " + range.start_value;
return;
}
long days = (tt_end > tt_start) ? (tt_end - tt_start)/DAY_SECONDS : (tt_start - tt_end)/DAY_SECONDS;
@ -117,8 +117,6 @@ namespace {
dates.push_back(date);
}
}
return SERVER_SUCCESS;
}
}
@ -138,19 +136,16 @@ ServerError CreateTableTask::OnExecute() {
try {
//step 1: check arguments
if(schema_.table_name.empty() || schema_.dimension <= 0) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Invalid table name or dimension";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
if(schema_.table_name.empty()) {
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
if(schema_.dimension <= 0) {
return SetError(SERVER_INVALID_TABLE_DIMENSION, "Invalid table dimension: " + std::to_string(schema_.dimension));
}
engine::EngineType engine_type = EngineType(schema_.index_type);
if(engine_type == engine::EngineType::INVALID) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Invalid index type";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_INDEX_TYPE, "Invalid index type: " + std::to_string(schema_.index_type));
}
//step 2: construct table schema
@ -163,17 +158,11 @@ ServerError CreateTableTask::OnExecute() {
//step 3: create table
engine::Status stat = DBWrapper::DB()->CreateTable(table_info);
if(!stat.ok()) {//table could exist
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
rc.Record("done");
@ -199,10 +188,7 @@ ServerError DescribeTableTask::OnExecute() {
try {
//step 1: check arguments
if(table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
//step 2: get table info
@ -210,10 +196,7 @@ ServerError DescribeTableTask::OnExecute() {
table_info.table_id_ = table_name_;
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
if(!stat.ok()) {
error_code_ = SERVER_TABLE_NOT_EXIST;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
schema_.table_name = table_info.table_id_;
@ -222,10 +205,7 @@ ServerError DescribeTableTask::OnExecute() {
schema_.store_raw_vector = table_info.store_raw_data_;
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return SERVER_UNEXPECTED_ERROR;
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
rc.Record("done");
@ -233,6 +213,41 @@ ServerError DescribeTableTask::OnExecute() {
return SERVER_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
HasTableTask::HasTableTask(const std::string& table_name, bool& has_table)
: BaseTask(DDL_DML_TASK_GROUP),
table_name_(table_name),
has_table_(has_table) {
}
BaseTaskPtr HasTableTask::Create(const std::string& table_name, bool& has_table) {
return std::shared_ptr<BaseTask>(new HasTableTask(table_name, has_table));
}
ServerError HasTableTask::OnExecute() {
try {
TimeRecorder rc("HasTableTask");
//step 1: check arguments
if(table_name_.empty()) {
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
//step 2: check table existence
engine::Status stat = DBWrapper::DB()->HasTable(table_name_, has_table_);
if(!stat.ok()) {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
rc.Elapse("totally cost");
} catch (std::exception& ex) {
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
return SERVER_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DeleteTableTask::DeleteTableTask(const std::string& table_name)
: BaseTask(DDL_DML_TASK_GROUP),
@ -240,8 +255,8 @@ DeleteTableTask::DeleteTableTask(const std::string& table_name)
}
BaseTaskPtr DeleteTableTask::Create(const std::string& group_id) {
return std::shared_ptr<BaseTask>(new DeleteTableTask(group_id));
BaseTaskPtr DeleteTableTask::Create(const std::string& table_name) {
return std::shared_ptr<BaseTask>(new DeleteTableTask(table_name));
}
ServerError DeleteTableTask::OnExecute() {
@ -250,10 +265,7 @@ ServerError DeleteTableTask::OnExecute() {
//step 1: check arguments
if (table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
//step 2: check table existence
@ -261,10 +273,11 @@ ServerError DeleteTableTask::OnExecute() {
table_info.table_id_ = table_name_;
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
if(!stat.ok()) {
error_code_ = SERVER_TABLE_NOT_EXIST;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
if(stat.IsNotFound()) {
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
} else {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
}
rc.Record("check validation");
@ -273,17 +286,13 @@ ServerError DeleteTableTask::OnExecute() {
std::vector<DB_DATE> dates;
stat = DBWrapper::DB()->DeleteTable(table_name_, dates);
if(!stat.ok()) {
SERVER_LOG_ERROR << "Engine failed: " << stat.ToString();
return SERVER_UNEXPECTED_ERROR;
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
rc.Record("deleta table");
rc.Elapse("totally cost");
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
return SERVER_SUCCESS;
@ -304,10 +313,7 @@ ServerError ShowTablesTask::OnExecute() {
std::vector<engine::meta::TableSchema> schema_array;
engine::Status stat = DBWrapper::DB()->AllTables(schema_array);
if(!stat.ok()) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
tables_.clear();
@ -341,17 +347,11 @@ ServerError AddVectorTask::OnExecute() {
//step 1: check arguments
if (table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
if(record_array_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Row record array is empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_ROWRECORD_ARRAY, "Row record array is empty");
}
//step 2: check table existence
@ -359,20 +359,22 @@ ServerError AddVectorTask::OnExecute() {
table_info.table_id_ = table_name_;
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
if(!stat.ok()) {
error_code_ = SERVER_TABLE_NOT_EXIST;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
if(stat.IsNotFound()) {
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
} else {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
}
rc.Record("check validation");
//step 3: prepare float data
std::vector<float> vec_f;
error_code_ = ConvertRowRecordToFloatArray(record_array_, table_info.dimension_, vec_f);
if(error_code_ != SERVER_SUCCESS) {
error_msg_ = "Invalid row record data";
return error_code_;
ServerError error_code = SERVER_SUCCESS;
std::string error_msg;
ConvertRowRecordToFloatArray(record_array_, table_info.dimension_, vec_f, error_code, error_msg);
if(error_code != SERVER_SUCCESS) {
return SetError(error_code, error_msg);
}
rc.Record("prepare vectors data");
@ -382,25 +384,20 @@ ServerError AddVectorTask::OnExecute() {
stat = DBWrapper::DB()->InsertVectors(table_name_, vec_count, vec_f.data(), record_ids_);
rc.Record("add vectors to engine");
if(!stat.ok()) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_CACHE_ERROR, "Cache error: " + stat.ToString());
}
if(record_ids_.size() != vec_count) {
SERVER_LOG_ERROR << "Vector ID not returned";
return SERVER_UNEXPECTED_ERROR;
std::string msg = "Add " + std::to_string(vec_count) + " vectors but only return "
+ std::to_string(record_ids_.size()) + " id";
return SetError(SERVER_ILLEGAL_VECTOR_ID, msg);
}
rc.Record("do insert");
rc.Elapse("totally cost");
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
return SERVER_SUCCESS;
@ -439,17 +436,14 @@ ServerError SearchVectorTask::OnExecute() {
//step 1: check arguments
if (table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
if(top_k_ <= 0 || record_array_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Invalid topk value, or query record array is empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
if(top_k_ <= 0) {
return SetError(SERVER_INVALID_TOPK, "Invalid topk: " + std::to_string(top_k_));
}
if(record_array_.empty()) {
return SetError(SERVER_INVALID_ROWRECORD_ARRAY, "Row record array is empty");
}
//step 2: check table existence
@ -457,28 +451,29 @@ ServerError SearchVectorTask::OnExecute() {
table_info.table_id_ = table_name_;
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
if(!stat.ok()) {
error_code_ = SERVER_TABLE_NOT_EXIST;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
if(stat.IsNotFound()) {
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
} else {
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
}
//step 3: check date range, and convert to db dates
std::vector<DB_DATE> dates;
error_code_ = ConvertTimeRangeToDBDates(range_array_, dates);
if(error_code_ != SERVER_SUCCESS) {
error_msg_ = "Invalid query range";
return error_code_;
ServerError error_code = SERVER_SUCCESS;
std::string error_msg;
ConvertTimeRangeToDBDates(range_array_, dates, error_code, error_msg);
if(error_code != SERVER_SUCCESS) {
return SetError(error_code, error_msg);
}
rc.Record("check validation");
//step 3: prepare float data
std::vector<float> vec_f;
error_code_ = ConvertRowRecordToFloatArray(record_array_, table_info.dimension_, vec_f);
if(error_code_ != SERVER_SUCCESS) {
error_msg_ = "Invalid row record data";
return error_code_;
ConvertRowRecordToFloatArray(record_array_, table_info.dimension_, vec_f, error_code, error_msg);
if(error_code != SERVER_SUCCESS) {
return SetError(error_code, error_msg);
}
rc.Record("prepare vector data");
@ -495,13 +490,17 @@ ServerError SearchVectorTask::OnExecute() {
rc.Record("search vectors from engine");
if(!stat.ok()) {
SERVER_LOG_ERROR << "Engine failed: " << stat.ToString();
return SERVER_UNEXPECTED_ERROR;
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
if(results.empty()) {
return SERVER_SUCCESS; //empty table
}
if(results.size() != record_count) {
SERVER_LOG_ERROR << "Search result not returned";
return SERVER_UNEXPECTED_ERROR;
std::string msg = "Search " + std::to_string(record_count) + " vectors but only return "
+ std::to_string(results.size()) + " results";
return SetError(SERVER_ILLEGAL_SEARCH_RESULT, msg);
}
rc.Record("do search");
@ -515,7 +514,7 @@ ServerError SearchVectorTask::OnExecute() {
for(auto& pair : result) {
thrift::QueryResult thrift_result;
thrift_result.__set_id(pair.first);
thrift_result.__set_score(pair.second);
thrift_result.__set_distance(pair.second);
thrift_topk_result.query_result_arrays.emplace_back(thrift_result);
}
@ -526,10 +525,7 @@ ServerError SearchVectorTask::OnExecute() {
rc.Elapse("totally cost");
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
return SERVER_SUCCESS;
@ -553,20 +549,14 @@ ServerError GetTableRowCountTask::OnExecute() {
//step 1: check arguments
if (table_name_.empty()) {
error_code_ = SERVER_INVALID_ARGUMENT;
error_msg_ = "Table name cannot be empty";
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_INVALID_TABLE_NAME, "Empty table name");
}
//step 2: get row count
uint64_t row_count = 0;
engine::Status stat = DBWrapper::DB()->GetTableRowCount(table_name_, row_count);
if (!stat.ok()) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = "Engine failed: " + stat.ToString();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(DB_META_TRANSACTION_FAILED, "Engine failed: " + stat.ToString());
}
row_count_ = (int64_t) row_count;
@ -574,10 +564,7 @@ ServerError GetTableRowCountTask::OnExecute() {
rc.Elapse("totally cost");
} catch (std::exception& ex) {
error_code_ = SERVER_UNEXPECTED_ERROR;
error_msg_ = ex.what();
SERVER_LOG_ERROR << error_msg_;
return error_code_;
return SetError(SERVER_UNEXPECTED_ERROR, ex.what());
}
return SERVER_SUCCESS;

View File

@ -7,7 +7,6 @@
#include "RequestScheduler.h"
#include "utils/Error.h"
#include "utils/AttributeSerializer.h"
#include "db/Types.h"
#include "milvus_types.h"
@ -33,6 +32,22 @@ private:
const ::milvus::thrift::TableSchema& schema_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class HasTableTask : public BaseTask {
public:
static BaseTaskPtr Create(const std::string& table_name, bool& has_table);
protected:
HasTableTask(const std::string& table_name, bool& has_table);
ServerError OnExecute() override;
private:
std::string table_name_;
bool& has_table_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class DescribeTableTask : public BaseTask {
public:

View File

@ -19,11 +19,14 @@ static const std::string CONFIG_SERVER_ADDRESS = "address";
static const std::string CONFIG_SERVER_PORT = "port";
static const std::string CONFIG_SERVER_PROTOCOL = "transfer_protocol";
static const std::string CONFIG_SERVER_MODE = "server_mode";
static const std::string CONFIG_CLUSTER_MODE = "mode";
static const std::string CONFIG_DB = "db_config";
static const std::string CONFIG_DB_URL = "db_backend_url";
static const std::string CONFIG_DB_PATH = "db_path";
static const std::string CONFIG_DB_INDEX_TRIGGER_SIZE = "index_building_threshold";
static const std::string CONFIG_DB_ARCHIVE_DISK = "archive_disk_threshold";
static const std::string CONFIG_DB_ARCHIVE_DAYS = "archive_days_threshold";
static const std::string CONFIG_LOG = "log_config";

View File

@ -1,159 +1,159 @@
#include "S3ClientWrapper.h"
#include <aws/s3/model/CreateBucketRequest.h>
#include <aws/s3/model/DeleteBucketRequest.h>
#include <aws/s3/model/PutObjectRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/DeleteObjectRequest.h>
#include <iostream>
#include <fstream>
namespace zilliz {
namespace milvus {
namespace engine {
namespace storage {
Status
S3ClientWrapper::Create(const std::string &ip_address,
const std::string &port,
const std::string &access_key,
const std::string &secret_key) {
Aws::InitAPI(options_);
Aws::Client::ClientConfiguration cfg;
// TODO: ip_address need to be validated.
cfg.endpointOverride = ip_address + ":" + port; // S3 server ip address and port
cfg.scheme = Aws::Http::Scheme::HTTP;
cfg.verifySSL =
false; //Aws::Auth::AWSCredentials cred("RPW421T9GSIO4A45Y9ZR", "2owKYy9emSS90Q0pXuyqpX1OxBCyEDYodsiBemcq"); // 认证的Key
client_ =
new S3Client(Aws::Auth::AWSCredentials(access_key, secret_key),
cfg,
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Always,
false);
if (client_ == nullptr) {
std::string error = "Can't connect server.";
return Status::Error(error);
} else {
return Status::OK();
}
}
Status
S3ClientWrapper::Close() {
if (client_ != nullptr) {
delete client_;
client_ = nullptr;
}
Aws::ShutdownAPI(options_);
return Status::OK();
}
Status
S3ClientWrapper::CreateBucket(std::string& bucket_name) {
Aws::S3::Model::CreateBucketRequest request;
request.SetBucket(bucket_name);
auto outcome = client_->CreateBucket(request);
if (outcome.IsSuccess())
{
return Status::OK();
}
else
{
std::cout << "CreateBucket error: "
<< outcome.GetError().GetExceptionName() << std::endl
<< outcome.GetError().GetMessage() << std::endl;
switch(outcome.GetError().GetErrorType()) {
case Aws::S3::S3Errors::BUCKET_ALREADY_EXISTS:
case Aws::S3::S3Errors::BUCKET_ALREADY_OWNED_BY_YOU:
return Status::AlreadyExist(outcome.GetError().GetMessage());
default:
return Status::Error(outcome.GetError().GetMessage());
}
}
}
Status
S3ClientWrapper::DeleteBucket(std::string& bucket_name) {
Aws::S3::Model::DeleteBucketRequest bucket_request;
bucket_request.SetBucket(bucket_name);
auto outcome = client_->DeleteBucket(bucket_request);
if (outcome.IsSuccess())
{
return Status::OK();
}
else
{
std::cout << "DeleteBucket error: "
<< outcome.GetError().GetExceptionName() << " - "
<< outcome.GetError().GetMessage() << std::endl;
return Status::Error(outcome.GetError().GetMessage());
}
}
Status
S3ClientWrapper::UploadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) {
PutObjectRequest putObjectRequest;
putObjectRequest.WithBucket(BucketName.c_str()).WithKey(objectKey.c_str());
auto input_data = Aws::MakeShared<Aws::FStream>("PutObjectInputStream",
pathkey.c_str(),
std::ios_base::in | std::ios_base::binary);
putObjectRequest.SetBody(input_data);
auto put_object_result = client_->PutObject(putObjectRequest);
if (put_object_result.IsSuccess()) {
return Status::OK();
} else {
std::cout << "PutObject error: " << put_object_result.GetError().GetExceptionName() << " "
<< put_object_result.GetError().GetMessage() << std::endl;
return Status::Error(put_object_result.GetError().GetMessage());
}
}
Status
S3ClientWrapper::DownloadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) {
GetObjectRequest object_request;
object_request.WithBucket(BucketName.c_str()).WithKey(objectKey.c_str());
auto get_object_outcome = client_->GetObject(object_request);
if (get_object_outcome.IsSuccess()) {
Aws::OFStream local_file(pathkey.c_str(), std::ios::out | std::ios::binary);
local_file << get_object_outcome.GetResult().GetBody().rdbuf();
return Status::OK();
} else {
std::cout << "GetObject error: " << get_object_outcome.GetError().GetExceptionName() << " "
<< get_object_outcome.GetError().GetMessage() << std::endl;
return Status::Error(get_object_outcome.GetError().GetMessage());
}
}
Status
S3ClientWrapper::DeleteFile(std::string &bucket_name, std::string &object_key) {
Aws::S3::Model::DeleteObjectRequest object_request;
object_request.WithBucket(bucket_name).WithKey(object_key);
auto delete_object_outcome = client_->DeleteObject(object_request);
if (delete_object_outcome.IsSuccess()) {
return Status::OK();
} else {
std::cout << "DeleteObject error: " <<
delete_object_outcome.GetError().GetExceptionName() << " " <<
delete_object_outcome.GetError().GetMessage() << std::endl;
return Status::Error(delete_object_outcome.GetError().GetMessage());
}
}
}
}
}
}
//#include "S3ClientWrapper.h"
//
//#include <aws/s3/model/CreateBucketRequest.h>
//#include <aws/s3/model/DeleteBucketRequest.h>
//#include <aws/s3/model/PutObjectRequest.h>
//#include <aws/s3/model/GetObjectRequest.h>
//#include <aws/s3/model/DeleteObjectRequest.h>
//
//#include <iostream>
//#include <fstream>
//
//
//namespace zilliz {
//namespace milvus {
//namespace engine {
//namespace storage {
//
//Status
//S3ClientWrapper::Create(const std::string &ip_address,
// const std::string &port,
// const std::string &access_key,
// const std::string &secret_key) {
// Aws::InitAPI(options_);
// Aws::Client::ClientConfiguration cfg;
//
// // TODO: ip_address need to be validated.
//
// cfg.endpointOverride = ip_address + ":" + port; // S3 server ip address and port
// cfg.scheme = Aws::Http::Scheme::HTTP;
// cfg.verifySSL =
// false; //Aws::Auth::AWSCredentials cred("RPW421T9GSIO4A45Y9ZR", "2owKYy9emSS90Q0pXuyqpX1OxBCyEDYodsiBemcq"); // 认证的Key
// client_ =
// new S3Client(Aws::Auth::AWSCredentials(access_key, secret_key),
// cfg,
// Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Always,
// false);
// if (client_ == nullptr) {
// std::string error = "Can't connect server.";
// return Status::Error(error);
// } else {
// return Status::OK();
// }
//}
//
//
//Status
//S3ClientWrapper::Close() {
// if (client_ != nullptr) {
// delete client_;
// client_ = nullptr;
// }
// Aws::ShutdownAPI(options_);
// return Status::OK();
//}
//
//Status
//S3ClientWrapper::CreateBucket(std::string& bucket_name) {
// Aws::S3::Model::CreateBucketRequest request;
// request.SetBucket(bucket_name);
//
// auto outcome = client_->CreateBucket(request);
//
// if (outcome.IsSuccess())
// {
// return Status::OK();
// }
// else
// {
// std::cout << "CreateBucket error: "
// << outcome.GetError().GetExceptionName() << std::endl
// << outcome.GetError().GetMessage() << std::endl;
// switch(outcome.GetError().GetErrorType()) {
// case Aws::S3::S3Errors::BUCKET_ALREADY_EXISTS:
// case Aws::S3::S3Errors::BUCKET_ALREADY_OWNED_BY_YOU:
// return Status::AlreadyExist(outcome.GetError().GetMessage());
// default:
// return Status::Error(outcome.GetError().GetMessage());
// }
// }
//}
//
//Status
//S3ClientWrapper::DeleteBucket(std::string& bucket_name) {
// Aws::S3::Model::DeleteBucketRequest bucket_request;
// bucket_request.SetBucket(bucket_name);
//
// auto outcome = client_->DeleteBucket(bucket_request);
//
// if (outcome.IsSuccess())
// {
// return Status::OK();
// }
// else
// {
// std::cout << "DeleteBucket error: "
// << outcome.GetError().GetExceptionName() << " - "
// << outcome.GetError().GetMessage() << std::endl;
// return Status::Error(outcome.GetError().GetMessage());
// }
//}
//
//Status
//S3ClientWrapper::UploadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) {
//
// PutObjectRequest putObjectRequest;
// putObjectRequest.WithBucket(BucketName.c_str()).WithKey(objectKey.c_str());
//
// auto input_data = Aws::MakeShared<Aws::FStream>("PutObjectInputStream",
// pathkey.c_str(),
// std::ios_base::in | std::ios_base::binary);
// putObjectRequest.SetBody(input_data);
// auto put_object_result = client_->PutObject(putObjectRequest);
// if (put_object_result.IsSuccess()) {
// return Status::OK();
// } else {
// std::cout << "PutObject error: " << put_object_result.GetError().GetExceptionName() << " "
// << put_object_result.GetError().GetMessage() << std::endl;
// return Status::Error(put_object_result.GetError().GetMessage());
// }
//}
//
//Status
//S3ClientWrapper::DownloadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) {
// GetObjectRequest object_request;
// object_request.WithBucket(BucketName.c_str()).WithKey(objectKey.c_str());
// auto get_object_outcome = client_->GetObject(object_request);
// if (get_object_outcome.IsSuccess()) {
// Aws::OFStream local_file(pathkey.c_str(), std::ios::out | std::ios::binary);
// local_file << get_object_outcome.GetResult().GetBody().rdbuf();
// return Status::OK();
// } else {
// std::cout << "GetObject error: " << get_object_outcome.GetError().GetExceptionName() << " "
// << get_object_outcome.GetError().GetMessage() << std::endl;
// return Status::Error(get_object_outcome.GetError().GetMessage());
// }
//}
//
//Status
//S3ClientWrapper::DeleteFile(std::string &bucket_name, std::string &object_key) {
// Aws::S3::Model::DeleteObjectRequest object_request;
// object_request.WithBucket(bucket_name).WithKey(object_key);
//
// auto delete_object_outcome = client_->DeleteObject(object_request);
//
// if (delete_object_outcome.IsSuccess()) {
// return Status::OK();
// } else {
// std::cout << "DeleteObject error: " <<
// delete_object_outcome.GetError().GetExceptionName() << " " <<
// delete_object_outcome.GetError().GetMessage() << std::endl;
//
// return Status::Error(delete_object_outcome.GetError().GetMessage());
// }
//}
//
//}
//}
//}
//}

View File

@ -1,45 +1,45 @@
#pragma once
#include "storage/IStorage.h"
#include <aws/s3/S3Client.h>
#include <aws/core/Aws.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
using namespace Aws::S3;
using namespace Aws::S3::Model;
namespace zilliz {
namespace milvus {
namespace engine {
namespace storage {
class S3ClientWrapper : public IStorage {
public:
S3ClientWrapper() = default;
~S3ClientWrapper() = default;
Status Create(const std::string &ip_address,
const std::string &port,
const std::string &access_key,
const std::string &secret_key) override;
Status Close() override;
Status CreateBucket(std::string& bucket_name) override;
Status DeleteBucket(std::string& bucket_name) override;
Status UploadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) override;
Status DownloadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) override;
Status DeleteFile(std::string &bucket_name, std::string &object_key) override;
private:
S3Client *client_ = nullptr;
Aws::SDKOptions options_;
};
}
}
}
}
//#pragma once
//
//#include "storage/IStorage.h"
//
//
//#include <aws/s3/S3Client.h>
//#include <aws/core/Aws.h>
//#include <aws/core/auth/AWSCredentialsProvider.h>
//
//
//using namespace Aws::S3;
//using namespace Aws::S3::Model;
//
//namespace zilliz {
//namespace milvus {
//namespace engine {
//namespace storage {
//
//class S3ClientWrapper : public IStorage {
// public:
//
// S3ClientWrapper() = default;
// ~S3ClientWrapper() = default;
//
// Status Create(const std::string &ip_address,
// const std::string &port,
// const std::string &access_key,
// const std::string &secret_key) override;
// Status Close() override;
//
// Status CreateBucket(std::string& bucket_name) override;
// Status DeleteBucket(std::string& bucket_name) override;
// Status UploadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) override;
// Status DownloadFile(std::string &BucketName, std::string &objectKey, std::string &pathkey) override;
// Status DeleteFile(std::string &bucket_name, std::string &object_key) override;
//
// private:
// S3Client *client_ = nullptr;
// Aws::SDKOptions options_;
//};
//
//}
//}
//}
//}

View File

@ -196,6 +196,213 @@ uint32_t MilvusService_CreateTable_presult::read(::apache::thrift::protocol::TPr
}
MilvusService_HasTable_args::~MilvusService_HasTable_args() throw() {
}
uint32_t MilvusService_HasTable_args::read(::apache::thrift::protocol::TProtocol* iprot) {
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->table_name);
this->__isset.table_name = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t MilvusService_HasTable_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
xfer += oprot->writeStructBegin("MilvusService_HasTable_args");
xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->table_name);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
MilvusService_HasTable_pargs::~MilvusService_HasTable_pargs() throw() {
}
uint32_t MilvusService_HasTable_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
::apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
xfer += oprot->writeStructBegin("MilvusService_HasTable_pargs");
xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString((*(this->table_name)));
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
MilvusService_HasTable_result::~MilvusService_HasTable_result() throw() {
}
uint32_t MilvusService_HasTable_result::read(::apache::thrift::protocol::TProtocol* iprot) {
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 0:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->success);
this->__isset.success = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->e.read(iprot);
this->__isset.e = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t MilvusService_HasTable_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("MilvusService_HasTable_result");
if (this->__isset.success) {
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0);
xfer += oprot->writeBool(this->success);
xfer += oprot->writeFieldEnd();
} else if (this->__isset.e) {
xfer += oprot->writeFieldBegin("e", ::apache::thrift::protocol::T_STRUCT, 1);
xfer += this->e.write(oprot);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
MilvusService_HasTable_presult::~MilvusService_HasTable_presult() throw() {
}
uint32_t MilvusService_HasTable_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
::apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 0:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool((*(this->success)));
this->__isset.success = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->e.read(iprot);
this->__isset.e = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
MilvusService_DeleteTable_args::~MilvusService_DeleteTable_args() throw() {
}
@ -2290,6 +2497,67 @@ void MilvusServiceClient::recv_CreateTable()
return;
}
bool MilvusServiceClient::HasTable(const std::string& table_name)
{
send_HasTable(table_name);
return recv_HasTable();
}
void MilvusServiceClient::send_HasTable(const std::string& table_name)
{
int32_t cseqid = 0;
oprot_->writeMessageBegin("HasTable", ::apache::thrift::protocol::T_CALL, cseqid);
MilvusService_HasTable_pargs args;
args.table_name = &table_name;
args.write(oprot_);
oprot_->writeMessageEnd();
oprot_->getTransport()->writeEnd();
oprot_->getTransport()->flush();
}
bool MilvusServiceClient::recv_HasTable()
{
int32_t rseqid = 0;
std::string fname;
::apache::thrift::protocol::TMessageType mtype;
iprot_->readMessageBegin(fname, mtype, rseqid);
if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
::apache::thrift::TApplicationException x;
x.read(iprot_);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
throw x;
}
if (mtype != ::apache::thrift::protocol::T_REPLY) {
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
}
if (fname.compare("HasTable") != 0) {
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
}
bool _return;
MilvusService_HasTable_presult result;
result.success = &_return;
result.read(iprot_);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
if (result.__isset.success) {
return _return;
}
if (result.__isset.e) {
throw result.e;
}
throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "HasTable failed: unknown result");
}
void MilvusServiceClient::DeleteTable(const std::string& table_name)
{
send_DeleteTable(table_name);
@ -2855,6 +3123,63 @@ void MilvusServiceProcessor::process_CreateTable(int32_t seqid, ::apache::thrift
}
}
void MilvusServiceProcessor::process_HasTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
{
void* ctx = NULL;
if (this->eventHandler_.get() != NULL) {
ctx = this->eventHandler_->getContext("MilvusService.HasTable", callContext);
}
::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "MilvusService.HasTable");
if (this->eventHandler_.get() != NULL) {
this->eventHandler_->preRead(ctx, "MilvusService.HasTable");
}
MilvusService_HasTable_args args;
args.read(iprot);
iprot->readMessageEnd();
uint32_t bytes = iprot->getTransport()->readEnd();
if (this->eventHandler_.get() != NULL) {
this->eventHandler_->postRead(ctx, "MilvusService.HasTable", bytes);
}
MilvusService_HasTable_result result;
try {
result.success = iface_->HasTable(args.table_name);
result.__isset.success = true;
} catch (Exception &e) {
result.e = e;
result.__isset.e = true;
} catch (const std::exception& e) {
if (this->eventHandler_.get() != NULL) {
this->eventHandler_->handlerError(ctx, "MilvusService.HasTable");
}
::apache::thrift::TApplicationException x(e.what());
oprot->writeMessageBegin("HasTable", ::apache::thrift::protocol::T_EXCEPTION, seqid);
x.write(oprot);
oprot->writeMessageEnd();
oprot->getTransport()->writeEnd();
oprot->getTransport()->flush();
return;
}
if (this->eventHandler_.get() != NULL) {
this->eventHandler_->preWrite(ctx, "MilvusService.HasTable");
}
oprot->writeMessageBegin("HasTable", ::apache::thrift::protocol::T_REPLY, seqid);
result.write(oprot);
oprot->writeMessageEnd();
bytes = oprot->getTransport()->writeEnd();
oprot->getTransport()->flush();
if (this->eventHandler_.get() != NULL) {
this->eventHandler_->postWrite(ctx, "MilvusService.HasTable", bytes);
}
}
void MilvusServiceProcessor::process_DeleteTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
{
void* ctx = NULL;
@ -3399,6 +3724,94 @@ void MilvusServiceConcurrentClient::recv_CreateTable(const int32_t seqid)
} // end while(true)
}
bool MilvusServiceConcurrentClient::HasTable(const std::string& table_name)
{
int32_t seqid = send_HasTable(table_name);
return recv_HasTable(seqid);
}
int32_t MilvusServiceConcurrentClient::send_HasTable(const std::string& table_name)
{
int32_t cseqid = this->sync_.generateSeqId();
::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_);
oprot_->writeMessageBegin("HasTable", ::apache::thrift::protocol::T_CALL, cseqid);
MilvusService_HasTable_pargs args;
args.table_name = &table_name;
args.write(oprot_);
oprot_->writeMessageEnd();
oprot_->getTransport()->writeEnd();
oprot_->getTransport()->flush();
sentry.commit();
return cseqid;
}
bool MilvusServiceConcurrentClient::recv_HasTable(const int32_t seqid)
{
int32_t rseqid = 0;
std::string fname;
::apache::thrift::protocol::TMessageType mtype;
// the read mutex gets dropped and reacquired as part of waitForWork()
// The destructor of this sentry wakes up other clients
::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid);
while(true) {
if(!this->sync_.getPending(fname, mtype, rseqid)) {
iprot_->readMessageBegin(fname, mtype, rseqid);
}
if(seqid == rseqid) {
if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
::apache::thrift::TApplicationException x;
x.read(iprot_);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
sentry.commit();
throw x;
}
if (mtype != ::apache::thrift::protocol::T_REPLY) {
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
}
if (fname.compare("HasTable") != 0) {
iprot_->skip(::apache::thrift::protocol::T_STRUCT);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
// in a bad state, don't commit
using ::apache::thrift::protocol::TProtocolException;
throw TProtocolException(TProtocolException::INVALID_DATA);
}
bool _return;
MilvusService_HasTable_presult result;
result.success = &_return;
result.read(iprot_);
iprot_->readMessageEnd();
iprot_->getTransport()->readEnd();
if (result.__isset.success) {
sentry.commit();
return _return;
}
if (result.__isset.e) {
sentry.commit();
throw result.e;
}
// in a bad state, don't commit
throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "HasTable failed: unknown result");
}
// seqid != rseqid
this->sync_.updatePending(fname, mtype, rseqid);
// this will temporarily unlock the readMutex, and let other clients get work done
this->sync_.waitForWork(seqid);
} // end while(true)
}
void MilvusServiceConcurrentClient::DeleteTable(const std::string& table_name)
{
int32_t seqid = send_DeleteTable(table_name);

View File

@ -34,6 +34,18 @@ class MilvusServiceIf {
*/
virtual void CreateTable(const TableSchema& param) = 0;
/**
* @brief Test table existence method
*
* This method is used to test table existence.
*
* @param table_name, table name is going to be tested.
*
*
* @param table_name
*/
virtual bool HasTable(const std::string& table_name) = 0;
/**
* @brief Delete table method
*
@ -178,6 +190,10 @@ class MilvusServiceNull : virtual public MilvusServiceIf {
void CreateTable(const TableSchema& /* param */) {
return;
}
bool HasTable(const std::string& /* table_name */) {
bool _return = false;
return _return;
}
void DeleteTable(const std::string& /* table_name */) {
return;
}
@ -309,6 +325,118 @@ class MilvusService_CreateTable_presult {
};
typedef struct _MilvusService_HasTable_args__isset {
_MilvusService_HasTable_args__isset() : table_name(false) {}
bool table_name :1;
} _MilvusService_HasTable_args__isset;
class MilvusService_HasTable_args {
public:
MilvusService_HasTable_args(const MilvusService_HasTable_args&);
MilvusService_HasTable_args& operator=(const MilvusService_HasTable_args&);
MilvusService_HasTable_args() : table_name() {
}
virtual ~MilvusService_HasTable_args() throw();
std::string table_name;
_MilvusService_HasTable_args__isset __isset;
void __set_table_name(const std::string& val);
bool operator == (const MilvusService_HasTable_args & rhs) const
{
if (!(table_name == rhs.table_name))
return false;
return true;
}
bool operator != (const MilvusService_HasTable_args &rhs) const {
return !(*this == rhs);
}
bool operator < (const MilvusService_HasTable_args & ) const;
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
};
class MilvusService_HasTable_pargs {
public:
virtual ~MilvusService_HasTable_pargs() throw();
const std::string* table_name;
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
};
typedef struct _MilvusService_HasTable_result__isset {
_MilvusService_HasTable_result__isset() : success(false), e(false) {}
bool success :1;
bool e :1;
} _MilvusService_HasTable_result__isset;
class MilvusService_HasTable_result {
public:
MilvusService_HasTable_result(const MilvusService_HasTable_result&);
MilvusService_HasTable_result& operator=(const MilvusService_HasTable_result&);
MilvusService_HasTable_result() : success(0) {
}
virtual ~MilvusService_HasTable_result() throw();
bool success;
Exception e;
_MilvusService_HasTable_result__isset __isset;
void __set_success(const bool val);
void __set_e(const Exception& val);
bool operator == (const MilvusService_HasTable_result & rhs) const
{
if (!(success == rhs.success))
return false;
if (!(e == rhs.e))
return false;
return true;
}
bool operator != (const MilvusService_HasTable_result &rhs) const {
return !(*this == rhs);
}
bool operator < (const MilvusService_HasTable_result & ) const;
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
};
typedef struct _MilvusService_HasTable_presult__isset {
_MilvusService_HasTable_presult__isset() : success(false), e(false) {}
bool success :1;
bool e :1;
} _MilvusService_HasTable_presult__isset;
class MilvusService_HasTable_presult {
public:
virtual ~MilvusService_HasTable_presult() throw();
bool* success;
Exception e;
_MilvusService_HasTable_presult__isset __isset;
uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
};
typedef struct _MilvusService_DeleteTable_args__isset {
_MilvusService_DeleteTable_args__isset() : table_name(false) {}
bool table_name :1;
@ -1269,6 +1397,9 @@ class MilvusServiceClient : virtual public MilvusServiceIf {
void CreateTable(const TableSchema& param);
void send_CreateTable(const TableSchema& param);
void recv_CreateTable();
bool HasTable(const std::string& table_name);
void send_HasTable(const std::string& table_name);
bool recv_HasTable();
void DeleteTable(const std::string& table_name);
void send_DeleteTable(const std::string& table_name);
void recv_DeleteTable();
@ -1309,6 +1440,7 @@ class MilvusServiceProcessor : public ::apache::thrift::TDispatchProcessor {
typedef std::map<std::string, ProcessFunction> ProcessMap;
ProcessMap processMap_;
void process_CreateTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_HasTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_DeleteTable(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_AddVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
void process_SearchVector(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@ -1321,6 +1453,7 @@ class MilvusServiceProcessor : public ::apache::thrift::TDispatchProcessor {
MilvusServiceProcessor(::apache::thrift::stdcxx::shared_ptr<MilvusServiceIf> iface) :
iface_(iface) {
processMap_["CreateTable"] = &MilvusServiceProcessor::process_CreateTable;
processMap_["HasTable"] = &MilvusServiceProcessor::process_HasTable;
processMap_["DeleteTable"] = &MilvusServiceProcessor::process_DeleteTable;
processMap_["AddVector"] = &MilvusServiceProcessor::process_AddVector;
processMap_["SearchVector"] = &MilvusServiceProcessor::process_SearchVector;
@ -1366,6 +1499,15 @@ class MilvusServiceMultiface : virtual public MilvusServiceIf {
ifaces_[i]->CreateTable(param);
}
bool HasTable(const std::string& table_name) {
size_t sz = ifaces_.size();
size_t i = 0;
for (; i < (sz - 1); ++i) {
ifaces_[i]->HasTable(table_name);
}
return ifaces_[i]->HasTable(table_name);
}
void DeleteTable(const std::string& table_name) {
size_t sz = ifaces_.size();
size_t i = 0;
@ -1477,6 +1619,9 @@ class MilvusServiceConcurrentClient : virtual public MilvusServiceIf {
void CreateTable(const TableSchema& param);
int32_t send_CreateTable(const TableSchema& param);
void recv_CreateTable(const int32_t seqid);
bool HasTable(const std::string& table_name);
int32_t send_HasTable(const std::string& table_name);
bool recv_HasTable(const int32_t seqid);
void DeleteTable(const std::string& table_name);
int32_t send_DeleteTable(const std::string& table_name);
void recv_DeleteTable(const int32_t seqid);

View File

@ -35,6 +35,21 @@ class MilvusServiceHandler : virtual public MilvusServiceIf {
printf("CreateTable\n");
}
/**
* @brief Test table existence method
*
* This method is used to test table existence.
*
* @param table_name, table name is going to be tested.
*
*
* @param table_name
*/
bool HasTable(const std::string& table_name) {
// Your implementation goes here
printf("HasTable\n");
}
/**
* @brief Delete table method
*

View File

@ -15,23 +15,51 @@ namespace milvus { namespace thrift {
int _kErrorCodeValues[] = {
ErrorCode::SUCCESS,
ErrorCode::UNEXPECTED_ERROR,
ErrorCode::CONNECT_FAILED,
ErrorCode::PERMISSION_DENIED,
ErrorCode::TABLE_NOT_EXISTS,
ErrorCode::ILLEGAL_ARGUMENT,
ErrorCode::ILLEGAL_RANGE,
ErrorCode::ILLEGAL_DIMENSION
ErrorCode::ILLEGAL_DIMENSION,
ErrorCode::ILLEGAL_INDEX_TYPE,
ErrorCode::ILLEGAL_TABLE_NAME,
ErrorCode::ILLEGAL_TOPK,
ErrorCode::ILLEGAL_ROWRECORD,
ErrorCode::ILLEGAL_VECTOR_ID,
ErrorCode::ILLEGAL_SEARCH_RESULT,
ErrorCode::FILE_NOT_FOUND,
ErrorCode::META_FAILED,
ErrorCode::CACHE_FAILED,
ErrorCode::CANNOT_CREATE_FOLDER,
ErrorCode::CANNOT_CREATE_FILE,
ErrorCode::CANNOT_DELETE_FOLDER,
ErrorCode::CANNOT_DELETE_FILE
};
const char* _kErrorCodeNames[] = {
"SUCCESS",
"UNEXPECTED_ERROR",
"CONNECT_FAILED",
"PERMISSION_DENIED",
"TABLE_NOT_EXISTS",
"ILLEGAL_ARGUMENT",
"ILLEGAL_RANGE",
"ILLEGAL_DIMENSION"
"ILLEGAL_DIMENSION",
"ILLEGAL_INDEX_TYPE",
"ILLEGAL_TABLE_NAME",
"ILLEGAL_TOPK",
"ILLEGAL_ROWRECORD",
"ILLEGAL_VECTOR_ID",
"ILLEGAL_SEARCH_RESULT",
"FILE_NOT_FOUND",
"META_FAILED",
"CACHE_FAILED",
"CANNOT_CREATE_FOLDER",
"CANNOT_CREATE_FILE",
"CANNOT_DELETE_FOLDER",
"CANNOT_DELETE_FILE"
};
const std::map<int, const char*> _ErrorCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(7, _kErrorCodeValues, _kErrorCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
const std::map<int, const char*> _ErrorCode_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(21, _kErrorCodeValues, _kErrorCodeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
std::ostream& operator<<(std::ostream& out, const ErrorCode::type& val) {
std::map<int, const char*>::const_iterator it = _ErrorCode_VALUES_TO_NAMES.find(val);
@ -536,8 +564,8 @@ void QueryResult::__set_id(const int64_t val) {
this->id = val;
}
void QueryResult::__set_score(const double val) {
this->score = val;
void QueryResult::__set_distance(const double val) {
this->distance = val;
}
std::ostream& operator<<(std::ostream& out, const QueryResult& obj)
{
@ -577,8 +605,8 @@ uint32_t QueryResult::read(::apache::thrift::protocol::TProtocol* iprot) {
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
xfer += iprot->readDouble(this->score);
this->__isset.score = true;
xfer += iprot->readDouble(this->distance);
this->__isset.distance = true;
} else {
xfer += iprot->skip(ftype);
}
@ -604,8 +632,8 @@ uint32_t QueryResult::write(::apache::thrift::protocol::TProtocol* oprot) const
xfer += oprot->writeI64(this->id);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("score", ::apache::thrift::protocol::T_DOUBLE, 2);
xfer += oprot->writeDouble(this->score);
xfer += oprot->writeFieldBegin("distance", ::apache::thrift::protocol::T_DOUBLE, 2);
xfer += oprot->writeDouble(this->distance);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
@ -616,18 +644,18 @@ uint32_t QueryResult::write(::apache::thrift::protocol::TProtocol* oprot) const
void swap(QueryResult &a, QueryResult &b) {
using ::std::swap;
swap(a.id, b.id);
swap(a.score, b.score);
swap(a.distance, b.distance);
swap(a.__isset, b.__isset);
}
QueryResult::QueryResult(const QueryResult& other9) {
id = other9.id;
score = other9.score;
distance = other9.distance;
__isset = other9.__isset;
}
QueryResult& QueryResult::operator=(const QueryResult& other10) {
id = other10.id;
score = other10.score;
distance = other10.distance;
__isset = other10.__isset;
return *this;
}
@ -635,7 +663,7 @@ void QueryResult::printTo(std::ostream& out) const {
using ::apache::thrift::to_string;
out << "QueryResult(";
out << "id=" << to_string(id);
out << ", " << "score=" << to_string(score);
out << ", " << "distance=" << to_string(distance);
out << ")";
}

View File

@ -23,12 +23,26 @@ namespace milvus { namespace thrift {
struct ErrorCode {
enum type {
SUCCESS = 0,
CONNECT_FAILED = 1,
PERMISSION_DENIED = 2,
TABLE_NOT_EXISTS = 3,
ILLEGAL_ARGUMENT = 4,
ILLEGAL_RANGE = 5,
ILLEGAL_DIMENSION = 6
UNEXPECTED_ERROR = 1,
CONNECT_FAILED = 2,
PERMISSION_DENIED = 3,
TABLE_NOT_EXISTS = 4,
ILLEGAL_ARGUMENT = 5,
ILLEGAL_RANGE = 6,
ILLEGAL_DIMENSION = 7,
ILLEGAL_INDEX_TYPE = 8,
ILLEGAL_TABLE_NAME = 9,
ILLEGAL_TOPK = 10,
ILLEGAL_ROWRECORD = 11,
ILLEGAL_VECTOR_ID = 12,
ILLEGAL_SEARCH_RESULT = 13,
FILE_NOT_FOUND = 14,
META_FAILED = 15,
CACHE_FAILED = 16,
CANNOT_CREATE_FOLDER = 17,
CANNOT_CREATE_FILE = 18,
CANNOT_DELETE_FOLDER = 19,
CANNOT_DELETE_FILE = 20
};
};
@ -242,9 +256,9 @@ void swap(RowRecord &a, RowRecord &b);
std::ostream& operator<<(std::ostream& out, const RowRecord& obj);
typedef struct _QueryResult__isset {
_QueryResult__isset() : id(false), score(false) {}
_QueryResult__isset() : id(false), distance(false) {}
bool id :1;
bool score :1;
bool distance :1;
} _QueryResult__isset;
class QueryResult : public virtual ::apache::thrift::TBase {
@ -252,24 +266,24 @@ class QueryResult : public virtual ::apache::thrift::TBase {
QueryResult(const QueryResult&);
QueryResult& operator=(const QueryResult&);
QueryResult() : id(0), score(0) {
QueryResult() : id(0), distance(0) {
}
virtual ~QueryResult() throw();
int64_t id;
double score;
double distance;
_QueryResult__isset __isset;
void __set_id(const int64_t val);
void __set_score(const double val);
void __set_distance(const double val);
bool operator == (const QueryResult & rhs) const
{
if (!(id == rhs.id))
return false;
if (!(score == rhs.score))
if (!(distance == rhs.distance))
return false;
return true;
}

View File

@ -15,12 +15,26 @@ namespace netcore milvus.thrift
enum ErrorCode {
SUCCESS = 0,
UNEXPECTED_ERROR,
CONNECT_FAILED,
PERMISSION_DENIED,
TABLE_NOT_EXISTS,
ILLEGAL_ARGUMENT,
ILLEGAL_RANGE,
ILLEGAL_DIMENSION,
ILLEGAL_INDEX_TYPE,
ILLEGAL_TABLE_NAME,
ILLEGAL_TOPK,
ILLEGAL_ROWRECORD,
ILLEGAL_VECTOR_ID,
ILLEGAL_SEARCH_RESULT,
FILE_NOT_FOUND,
META_FAILED,
CACHE_FAILED,
CANNOT_CREATE_FOLDER,
CANNOT_CREATE_FILE,
CANNOT_DELETE_FOLDER,
CANNOT_DELETE_FILE,
}
exception Exception {
@ -59,7 +73,7 @@ struct RowRecord {
*/
struct QueryResult {
1: i64 id; ///< Output result
2: double score; ///< Vector similarity score: 0 ~ 100
2: double distance; ///< Vector similarity distance
}
/**
@ -80,6 +94,16 @@ service MilvusService {
*/
void CreateTable(2: TableSchema param) throws(1: Exception e);
/**
* @brief Test table existence method
*
* This method is used to test table existence.
*
* @param table_name, table name is going to be tested.
*
*/
bool HasTable(2: string table_name) throws(1: Exception e);
/**
* @brief Delete table method

View File

@ -1,50 +0,0 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#include "AttributeSerializer.h"
#include "StringHelpFunctions.h"
namespace zilliz {
namespace milvus {
namespace server {
ServerError AttributeSerializer::Encode(const AttribMap& attrib_map, std::string& attrib_str) {
attrib_str = "";
for(auto iter : attrib_map) {
attrib_str += iter.first;
attrib_str += ":\"";
attrib_str += iter.second;
attrib_str += "\";";
}
return SERVER_SUCCESS;
}
ServerError AttributeSerializer::Decode(const std::string& attrib_str, AttribMap& attrib_map) {
attrib_map.clear();
std::vector<std::string> kv_pairs;
StringHelpFunctions::SplitStringByQuote(attrib_str, ";", "\"", kv_pairs);
for(std::string& str : kv_pairs) {
std::string key, val;
size_t index = str.find_first_of(":", 0);
if (index != std::string::npos) {
key = str.substr(0, index);
val = str.substr(index + 1);
} else {
key = str;
}
attrib_map.insert(std::make_pair(key, val));
}
return SERVER_SUCCESS;
}
}
}
}

View File

@ -1,27 +0,0 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#pragma once
#include <map>
#include "Error.h"
namespace zilliz {
namespace milvus {
namespace server {
using AttribMap = std::map<std::string, std::string>;
class AttributeSerializer {
public:
static ServerError Encode(const AttribMap& attrib_map, std::string& attrib_str);
static ServerError Decode(const std::string& attrib_str, AttribMap& attrib_map);
};
}
}
}

View File

@ -51,7 +51,7 @@ bool CommonUtil::GetSystemAvailableThreads(unsigned int &threadCnt) {
return true;
}
bool CommonUtil::IsDirectoryExit(const std::string &path)
bool CommonUtil::IsDirectoryExist(const std::string &path)
{
DIR *dp = nullptr;
if ((dp = opendir(path.c_str())) == nullptr) {
@ -182,7 +182,7 @@ void CommonUtil::ConvertTime(time_t time_integer, tm &time_struct) {
memcpy(&time_struct, t_m, sizeof(tm));
}
void ConvertTime(tm time_struct, time_t &time_integer) {
void CommonUtil::ConvertTime(tm time_struct, time_t &time_integer) {
time_integer = mktime(&time_struct);
}

View File

@ -20,7 +20,7 @@ class CommonUtil {
static bool GetSystemAvailableThreads(unsigned int &threadCnt);
static bool IsFileExist(const std::string &path);
static bool IsDirectoryExit(const std::string &path);
static bool IsDirectoryExist(const std::string &path);
static ServerError CreateDirectory(const std::string &path);
static ServerError DeleteDirectory(const std::string &path);

View File

@ -24,18 +24,35 @@ ToGlobalServerErrorCode(const ServerError error_code) {
return SERVER_ERROR_CODE_BASE + error_code;
}
constexpr ServerError SERVER_UNEXPECTED_ERROR = ToGlobalServerErrorCode(0x001);
constexpr ServerError SERVER_UNSUPPORTED_ERROR = ToGlobalServerErrorCode(0x002);
constexpr ServerError SERVER_NULL_POINTER = ToGlobalServerErrorCode(0x003);
constexpr ServerError SERVER_INVALID_ARGUMENT = ToGlobalServerErrorCode(0x004);
constexpr ServerError SERVER_FILE_NOT_FOUND = ToGlobalServerErrorCode(0x005);
constexpr ServerError SERVER_NOT_IMPLEMENT = ToGlobalServerErrorCode(0x006);
constexpr ServerError SERVER_BLOCKING_QUEUE_EMPTY = ToGlobalServerErrorCode(0x007);
constexpr ServerError SERVER_TABLE_NOT_EXIST = ToGlobalServerErrorCode(0x008);
constexpr ServerError SERVER_INVALID_TIME_RANGE = ToGlobalServerErrorCode(0x009);
constexpr ServerError SERVER_INVALID_VECTOR_DIMENSION = ToGlobalServerErrorCode(0x00a);
constexpr ServerError SERVER_LICENSE_VALIDATION_FAIL = ToGlobalServerErrorCode(0x00b);
constexpr ServerError SERVER_LICENSE_FILE_NOT_EXIST = ToGlobalServerErrorCode(0x00c);
constexpr ServerError SERVER_UNEXPECTED_ERROR = ToGlobalServerErrorCode(1);
constexpr ServerError SERVER_UNSUPPORTED_ERROR = ToGlobalServerErrorCode(2);
constexpr ServerError SERVER_NULL_POINTER = ToGlobalServerErrorCode(3);
constexpr ServerError SERVER_INVALID_ARGUMENT = ToGlobalServerErrorCode(4);
constexpr ServerError SERVER_FILE_NOT_FOUND = ToGlobalServerErrorCode(5);
constexpr ServerError SERVER_NOT_IMPLEMENT = ToGlobalServerErrorCode(6);
constexpr ServerError SERVER_BLOCKING_QUEUE_EMPTY = ToGlobalServerErrorCode(7);
constexpr ServerError SERVER_CANNOT_CREATE_FOLDER = ToGlobalServerErrorCode(8);
constexpr ServerError SERVER_CANNOT_CREATE_FILE = ToGlobalServerErrorCode(9);
constexpr ServerError SERVER_CANNOT_DELETE_FOLDER = ToGlobalServerErrorCode(10);
constexpr ServerError SERVER_CANNOT_DELETE_FILE = ToGlobalServerErrorCode(11);
constexpr ServerError SERVER_TABLE_NOT_EXIST = ToGlobalServerErrorCode(100);
constexpr ServerError SERVER_INVALID_TABLE_NAME = ToGlobalServerErrorCode(101);
constexpr ServerError SERVER_INVALID_TABLE_DIMENSION = ToGlobalServerErrorCode(102);
constexpr ServerError SERVER_INVALID_TIME_RANGE = ToGlobalServerErrorCode(103);
constexpr ServerError SERVER_INVALID_VECTOR_DIMENSION = ToGlobalServerErrorCode(104);
constexpr ServerError SERVER_INVALID_INDEX_TYPE = ToGlobalServerErrorCode(105);
constexpr ServerError SERVER_INVALID_ROWRECORD = ToGlobalServerErrorCode(106);
constexpr ServerError SERVER_INVALID_ROWRECORD_ARRAY = ToGlobalServerErrorCode(107);
constexpr ServerError SERVER_INVALID_TOPK = ToGlobalServerErrorCode(108);
constexpr ServerError SERVER_ILLEGAL_VECTOR_ID = ToGlobalServerErrorCode(109);
constexpr ServerError SERVER_ILLEGAL_SEARCH_RESULT = ToGlobalServerErrorCode(110);
constexpr ServerError SERVER_CACHE_ERROR = ToGlobalServerErrorCode(111);
constexpr ServerError SERVER_LICENSE_FILE_NOT_EXIST = ToGlobalServerErrorCode(500);
constexpr ServerError SERVER_LICENSE_VALIDATION_FAIL = ToGlobalServerErrorCode(501);
constexpr ServerError DB_META_TRANSACTION_FAILED = ToGlobalServerErrorCode(1000);
class ServerException : public std::exception {
public:

View File

@ -9,13 +9,6 @@ namespace zilliz {
namespace milvus {
namespace server {
void StringHelpFunctions::TrimStringLineBreak(std::string &string) {
if (!string.empty()) {
static std::string s_format("\n\r");
string.erase(string.find_last_not_of(s_format) + 1);
}
}
void StringHelpFunctions::TrimStringBlank(std::string &string) {
if (!string.empty()) {
static std::string s_format(" \n\r\t");

View File

@ -18,8 +18,6 @@ private:
StringHelpFunctions() = default;
public:
static void TrimStringLineBreak(std::string &string);
static void TrimStringBlank(std::string &string);
static void TrimStringQuote(std::string &string, const std::string &qoute);

View File

@ -13,6 +13,7 @@
#include "Index.h"
#include "faiss/index_io.h"
#include "faiss/IndexIVF.h"
namespace zilliz {
namespace milvus {
@ -55,6 +56,9 @@ bool Index::add_with_ids(idx_t n, const float *xdata, const long *xids) {
bool Index::search(idx_t n, const float *data, idx_t k, float *distances, long *labels) const {
try {
if(auto ivf_index = std::dynamic_pointer_cast<faiss::IndexIVF>(index_)) {
ivf_index->nprobe = 100;
}
index_->search(n, data, k, distances, labels);
}
catch (std::exception &e) {

View File

@ -39,7 +39,7 @@ string Operand::get_index_type(const int &nb) {
}
case IVF: {
index_str += (ncent != 0 ? index_type + std::to_string(ncent) :
index_type + std::to_string(int(nb / 1000000.0 * 16384)));
index_type + std::to_string(int(nb / 1000000.0 * 1638)));
break;
}
case IDMAP: {

View File

@ -7,6 +7,7 @@ GTEST_VERSION=1.8.1
JSONCONS_VERSION=0.126.0
LAPACK_VERSION=v3.8.0
LZ4_VERSION=v1.9.1
MYSQLPP_VERSION=3.2.4
OPENBLAS_VERSION=v0.3.6
PROMETHEUS_VERSION=v0.7.0
ROCKSDB_VERSION=v6.0.2

View File

@ -5,15 +5,11 @@
#-------------------------------------------------------------------------------
link_directories(
"${CMAKE_BINARY_DIR}/lib"
"${GTEST_PREFIX}/lib/"
)
aux_source_directory(${MILVUS_ENGINE_SRC}/db db_srcs)
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
message(STATUS "GTEST LIB: ${GTEST_PREFIX}/lib")
set(unittest_srcs
${CMAKE_CURRENT_SOURCE_DIR}/main.cpp)
#${EASYLOGGINGPP_INCLUDE_DIR}/easylogging++.cc)
@ -48,5 +44,7 @@ add_subdirectory(db)
#add_subdirectory(faiss_wrapper)
add_subdirectory(index_wrapper)
add_subdirectory(license)
add_subdirectory(faiss_wrapper)
#add_subdirectory(license)
add_subdirectory(metrics)
add_subdirectory(storage)

View File

@ -7,6 +7,7 @@ aux_source_directory(${MILVUS_ENGINE_SRC}/db db_srcs)
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_srcs)
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper wrapper_src)
aux_source_directory(./ test_srcs)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/scheduler scheduler_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/scheduler/context scheduler_context_files)
@ -20,19 +21,17 @@ set(db_scheduler_srcs
include_directories(/usr/local/cuda/include)
link_directories("/usr/local/cuda/lib64")
include_directories(/usr/include/mysql)
set(db_test_src
${unittest_srcs}
#${unittest_srcs}
${config_files}
${cache_srcs}
${db_srcs}
${db_scheduler_srcs}
${wrapper_src}
${require_files}
utils.cpp
db_tests.cpp
meta_tests.cpp)
${test_srcs})
cuda_add_executable(db_test ${db_test_src})
@ -45,8 +44,9 @@ set(db_libs
boost_system
boost_filesystem
lz4
mysqlpp
)
target_link_libraries(db_test ${db_libs} ${unittest_libs})
install(TARGETS db_test DESTINATION bin)
install(TARGETS db_test DESTINATION bin)

View File

@ -12,31 +12,34 @@
#include "db/DB.h"
#include "db/DBImpl.h"
#include "db/MetaConsts.h"
#include "db/Factories.h"
using namespace zilliz::milvus;
namespace {
static const std::string TABLE_NAME = "test_group";
static constexpr int64_t TABLE_DIM = 256;
static const std::string TABLE_NAME = "test_group";
static constexpr int64_t TABLE_DIM = 256;
static constexpr int64_t VECTOR_COUNT = 250000;
static constexpr int64_t INSERT_LOOP = 100000;
engine::meta::TableSchema BuildTableSchema() {
engine::meta::TableSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.table_id_ = TABLE_NAME;
table_info.engine_type_ = (int)engine::EngineType::FAISS_IDMAP;
return table_info;
}
void BuildVectors(int64_t n, std::vector<float>& vectors) {
vectors.clear();
vectors.resize(n*TABLE_DIM);
float* data = vectors.data();
for(int i = 0; i < n; i++) {
for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48();
data[TABLE_DIM * i] += i / 2000.;
engine::meta::TableSchema BuildTableSchema() {
engine::meta::TableSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.table_id_ = TABLE_NAME;
table_info.engine_type_ = (int)engine::EngineType::FAISS_IDMAP;
return table_info;
}
void BuildVectors(int64_t n, std::vector<float>& vectors) {
vectors.clear();
vectors.resize(n*TABLE_DIM);
float* data = vectors.data();
for(int i = 0; i < n; i++) {
for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48();
data[TABLE_DIM * i] += i / 2000.;
}
}
}
}
@ -88,20 +91,14 @@ TEST_F(DBTest, CONFIG_TEST) {
TEST_F(DBTest, DB_TEST) {
static const std::string table_name = "test_group";
static const int table_dim = 256;
engine::meta::TableSchema table_info;
table_info.dimension_ = table_dim;
table_info.table_id_ = table_name;
table_info.engine_type_ = (int)engine::EngineType::FAISS_IDMAP;
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = table_name;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_STATS(stat);
ASSERT_EQ(table_info_get.dimension_, table_dim);
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
engine::IDNumbers vector_ids;
engine::IDNumbers target_ids;
@ -130,7 +127,7 @@ TEST_F(DBTest, DB_TEST) {
prev_count = count;
START_TIMER;
stat = db_->Query(table_name, k, qb, qxb.data(), results);
stat = db_->Query(TABLE_NAME, k, qb, qxb.data(), results);
ss << "Search " << j << " With Size " << count/engine::meta::M << " M";
STOP_TIMER(ss.str());
@ -149,14 +146,14 @@ TEST_F(DBTest, DB_TEST) {
}
});
int loop = 100000;
int loop = INSERT_LOOP;
for (auto i=0; i<loop; ++i) {
if (i==40) {
db_->InsertVectors(table_name, qb, qxb.data(), target_ids);
db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
ASSERT_EQ(target_ids.size(), qb);
} else {
db_->InsertVectors(table_name, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
}
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
@ -175,7 +172,7 @@ TEST_F(DBTest, SEARCH_TEST) {
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
// prepare raw data
size_t nb = 250000;
size_t nb = VECTOR_COUNT;
size_t nq = 10;
size_t k = 5;
std::vector<float> xb(nb*TABLE_DIM);
@ -223,6 +220,18 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
std::vector<engine::meta::TableSchema> table_schema_array;
stat = db_->AllTables(table_schema_array);
ASSERT_STATS(stat);
bool bfound = false;
for(auto& schema : table_schema_array) {
if(schema.table_id_ == TABLE_NAME) {
bfound = true;
break;
}
}
ASSERT_TRUE(bfound);
engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
@ -239,7 +248,7 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
std::vector<float> xb;
BuildVectors(nb, xb);
int loop = 100000;
int loop = INSERT_LOOP;
for (auto i=0; i<loop; ++i) {
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
@ -270,7 +279,7 @@ TEST_F(DBTest2, DELETE_TEST) {
uint64_t size;
db_->Size(size);
int64_t nb = 100000;
int64_t nb = INSERT_LOOP;
std::vector<float> xb;
BuildVectors(nb, xb);

View File

@ -17,43 +17,53 @@
using namespace zilliz::milvus::engine;
TEST_F(MetaTest, GROUP_TEST) {
auto table_id = "meta_test_group";
TEST_F(MetaTest, TABLE_TEST) {
auto table_id = "meta_test_table";
meta::TableSchema group;
group.table_id_ = table_id;
auto status = impl_->CreateTable(group);
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl_->CreateTable(table);
ASSERT_TRUE(status.ok());
auto gid = group.id_;
group.id_ = -1;
status = impl_->DescribeTable(group);
auto gid = table.id_;
table.id_ = -1;
status = impl_->DescribeTable(table);
ASSERT_TRUE(status.ok());
ASSERT_EQ(group.id_, gid);
ASSERT_EQ(group.table_id_, table_id);
ASSERT_EQ(table.id_, gid);
ASSERT_EQ(table.table_id_, table_id);
group.table_id_ = "not_found";
status = impl_->DescribeTable(group);
table.table_id_ = "not_found";
status = impl_->DescribeTable(table);
ASSERT_TRUE(!status.ok());
group.table_id_ = table_id;
status = impl_->CreateTable(group);
ASSERT_TRUE(!status.ok());
table.table_id_ = table_id;
status = impl_->CreateTable(table);
ASSERT_TRUE(status.ok());
table.table_id_ = "";
status = impl_->CreateTable(table);
ASSERT_TRUE(status.ok());
}
TEST_F(MetaTest, table_file_TEST) {
auto table_id = "meta_test_group";
TEST_F(MetaTest, TABLE_FILE_TEST) {
auto table_id = "meta_test_table";
meta::TableSchema group;
group.table_id_ = table_id;
auto status = impl_->CreateTable(group);
meta::TableSchema table;
table.table_id_ = table_id;
table.dimension_ = 256;
auto status = impl_->CreateTable(table);
meta::TableFileSchema table_file;
table_file.table_id_ = group.table_id_;
table_file.table_id_ = table.table_id_;
status = impl_->CreateTableFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, meta::TableFileSchema::NEW);
uint64_t cnt = 0;
status = impl_->Count(table_id, cnt);
ASSERT_TRUE(status.ok());
ASSERT_EQ(cnt, 0UL);
auto file_id = table_file.file_id_;
auto new_file_type = meta::TableFileSchema::INDEX;
@ -104,15 +114,15 @@ TEST_F(MetaTest, ARCHIVE_TEST_DAYS) {
options.archive_conf = ArchiveConf("delete", ss.str());
auto impl = meta::DBMetaImpl(options);
auto table_id = "meta_test_group";
auto table_id = "meta_test_table";
meta::TableSchema group;
group.table_id_ = table_id;
auto status = impl.CreateTable(group);
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
meta::TableFilesSchema files;
meta::TableFileSchema table_file;
table_file.table_id_ = group.table_id_;
table_file.table_id_ = table.table_id_;
auto cnt = 100;
long ts = utils::GetMicroSecTimeStamp();
@ -156,13 +166,13 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) {
auto impl = meta::DBMetaImpl(options);
auto table_id = "meta_test_group";
meta::TableSchema group;
group.table_id_ = table_id;
auto status = impl.CreateTable(group);
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
meta::TableFilesSchema files;
meta::TableFileSchema table_file;
table_file.table_id_ = group.table_id_;
table_file.table_id_ = table.table_id_;
auto cnt = 10;
auto each_size = 2UL;
@ -198,9 +208,9 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) {
TEST_F(MetaTest, TABLE_FILES_TEST) {
auto table_id = "meta_test_group";
meta::TableSchema group;
group.table_id_ = table_id;
auto status = impl_->CreateTable(group);
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl_->CreateTable(table);
int new_files_cnt = 4;
int raw_files_cnt = 5;
@ -208,7 +218,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
int index_files_cnt = 7;
meta::TableFileSchema table_file;
table_file.table_id_ = group.table_id_;
table_file.table_id_ = table.table_id_;
for (auto i=0; i<new_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
@ -241,7 +251,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
ASSERT_EQ(files.size(), to_index_files_cnt);
meta::DatePartionedTableFilesSchema dated_files;
status = impl_->FilesToMerge(group.table_id_, dated_files);
status = impl_->FilesToMerge(table.table_id_, dated_files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(dated_files[table_file.date_].size(), raw_files_cnt);
@ -254,4 +264,9 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
ASSERT_TRUE(status.ok());
ASSERT_EQ(dated_files[table_file.date_].size(),
to_index_files_cnt+raw_files_cnt+index_files_cnt);
status = impl_->FilesToSearch(table_id, meta::DatesT(), dated_files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(dated_files[table_file.date_].size(),
to_index_files_cnt+raw_files_cnt+index_files_cnt);
}

View File

@ -0,0 +1,137 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <thread>
#include <easylogging++.h>
#include <boost/filesystem.hpp>
#include "db/FaissExecutionEngine.h"
#include "db/Exception.h"
#include "db/Status.h"
#include "db/Options.h"
#include "db/DBMetaImpl.h"
#include "db/EngineFactory.h"
#include <vector>
using namespace zilliz::milvus;
namespace {
void CopyStatus(engine::Status& st1, engine::Status& st2) {
st1 = st2;
}
}
TEST(DBMiscTest, ENGINE_API_TEST) {
//engine api AddWithIdArray
const uint16_t dim = 512;
const long n = 10;
engine::FaissExecutionEngine engine(512, "/tmp/1", "IDMap", "IDMap,Flat");
std::vector<float> vectors;
std::vector<long> ids;
for (long i = 0; i < n; i++) {
for (uint16_t k = 0; k < dim; k++) {
vectors.push_back((float) k);
}
ids.push_back(i);
}
auto status = engine.AddWithIdArray(vectors, ids);
ASSERT_TRUE(status.ok());
auto engine_ptr = engine::EngineFactory::Build(128, "/tmp", engine::EngineType::INVALID);
ASSERT_EQ(engine_ptr, nullptr);
engine_ptr = engine::EngineFactory::Build(128, "/tmp", engine::EngineType::FAISS_IVFFLAT);
ASSERT_NE(engine_ptr, nullptr);
engine_ptr = engine::EngineFactory::Build(128, "/tmp", engine::EngineType::FAISS_IDMAP);
ASSERT_NE(engine_ptr, nullptr);
}
TEST(DBMiscTest, EXCEPTION_TEST) {
engine::Exception ex1("");
std::string what = ex1.what();
ASSERT_FALSE(what.empty());
engine::OutOfRangeException ex2;
what = ex2.what();
ASSERT_FALSE(what.empty());
}
TEST(DBMiscTest, STATUS_TEST) {
engine::Status status = engine::Status::OK();
std::string str = status.ToString();
ASSERT_FALSE(str.empty());
status = engine::Status::Error("wrong", "mistake");
ASSERT_TRUE(status.IsError());
str = status.ToString();
ASSERT_FALSE(str.empty());
status = engine::Status::NotFound("wrong", "mistake");
ASSERT_TRUE(status.IsNotFound());
str = status.ToString();
ASSERT_FALSE(str.empty());
status = engine::Status::DBTransactionError("wrong", "mistake");
ASSERT_TRUE(status.IsDBTransactionError());
str = status.ToString();
ASSERT_FALSE(str.empty());
engine::Status status_copy = engine::Status::OK();
CopyStatus(status_copy, status);
ASSERT_TRUE(status.IsDBTransactionError());
}
TEST(DBMiscTest, OPTIONS_TEST) {
try {
engine::ArchiveConf archive("$$##");
} catch (std::exception& ex) {
ASSERT_TRUE(true);
}
{
engine::ArchiveConf archive("delete", "no");
ASSERT_TRUE(archive.GetCriterias().empty());
}
{
engine::ArchiveConf archive("delete", "1:2");
ASSERT_TRUE(archive.GetCriterias().empty());
}
{
engine::ArchiveConf archive("delete", "1:2:3");
ASSERT_TRUE(archive.GetCriterias().empty());
}
{
engine::ArchiveConf archive("delete");
engine::ArchiveConf::CriteriaT criterial = {
{"disk", 1024},
{"days", 100}
};
archive.SetCriterias(criterial);
auto crit = archive.GetCriterias();
ASSERT_EQ(criterial["disk"], 1024);
ASSERT_EQ(criterial["days"], 100);
}
}
TEST(DBMiscTest, META_TEST) {
engine::DBMetaOptions options;
options.path = "/tmp/milvus_test";
engine::meta::DBMetaImpl impl(options);
time_t tt;
time( &tt );
int delta = 10;
engine::meta::DateT dt = impl.GetDate(tt, delta);
ASSERT_GT(dt, 0);
}

View File

@ -0,0 +1,305 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <thread>
#include <easylogging++.h>
#include <boost/filesystem.hpp>
#include "utils.h"
#include "db/DB.h"
#include "db/DBImpl.h"
#include "db/MetaConsts.h"
#include "db/Factories.h"
using namespace zilliz::milvus;
namespace {
static const std::string TABLE_NAME = "test_group";
static constexpr int64_t TABLE_DIM = 256;
static constexpr int64_t VECTOR_COUNT = 250000;
static constexpr int64_t INSERT_LOOP = 100000;
engine::meta::TableSchema BuildTableSchema() {
engine::meta::TableSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.table_id_ = TABLE_NAME;
table_info.engine_type_ = (int)engine::EngineType::FAISS_IDMAP;
return table_info;
}
void BuildVectors(int64_t n, std::vector<float>& vectors) {
vectors.clear();
vectors.resize(n*TABLE_DIM);
float* data = vectors.data();
for(int i = 0; i < n; i++) {
for(int j = 0; j < TABLE_DIM; j++) data[TABLE_DIM * i + j] = drand48();
data[TABLE_DIM * i] += i / 2000.;
}
}
}
TEST_F(MySQLDBTest, DB_TEST) {
auto options = GetOptions();
auto db_ = engine::DBFactory::Build(options);
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_STATS(stat);
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
engine::IDNumbers vector_ids;
engine::IDNumbers target_ids;
int64_t nb = 50;
std::vector<float> xb;
BuildVectors(nb, xb);
int64_t qb = 5;
std::vector<float> qxb;
BuildVectors(qb, qxb);
db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
ASSERT_EQ(target_ids.size(), qb);
std::thread search([&]() {
engine::QueryResults results;
int k = 10;
std::this_thread::sleep_for(std::chrono::seconds(5));
INIT_TIMER;
std::stringstream ss;
uint64_t count = 0;
uint64_t prev_count = 0;
for (auto j=0; j<10; ++j) {
ss.str("");
db_->Size(count);
prev_count = count;
START_TIMER;
stat = db_->Query(TABLE_NAME, k, qb, qxb.data(), results);
ss << "Search " << j << " With Size " << count/engine::meta::M << " M";
STOP_TIMER(ss.str());
ASSERT_STATS(stat);
for (auto k=0; k<qb; ++k) {
// std::cout << results[k][0].first << " " << target_ids[k] << std::endl;
// ASSERT_EQ(results[k][0].first, target_ids[k]);
bool exists = false;
for (auto& result : results[k]) {
if (result.first == target_ids[k]) {
exists = true;
}
}
ASSERT_TRUE(exists);
ss.str("");
ss << "Result [" << k << "]:";
for (auto result : results[k]) {
ss << result.first << " ";
}
/* LOG(DEBUG) << ss.str(); */
}
ASSERT_TRUE(count >= prev_count);
std::this_thread::sleep_for(std::chrono::seconds(3));
}
});
int loop = INSERT_LOOP;
for (auto i=0; i<loop; ++i) {
// if (i==10) {
// db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
// ASSERT_EQ(target_ids.size(), qb);
// } else {
// db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
// }
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
search.join();
delete db_;
auto dummyDB = engine::DBFactory::Build(options);
dummyDB->DropAll();
delete dummyDB;
};
TEST_F(MySQLDBTest, SEARCH_TEST) {
auto options = GetOptions();
auto db_ = engine::DBFactory::Build(options);
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_STATS(stat);
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
// prepare raw data
size_t nb = VECTOR_COUNT;
size_t nq = 10;
size_t k = 5;
std::vector<float> xb(nb*TABLE_DIM);
std::vector<float> xq(nq*TABLE_DIM);
std::vector<long> ids(nb);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis_xt(-1.0, 1.0);
for (size_t i = 0; i < nb*TABLE_DIM; i++) {
xb[i] = dis_xt(gen);
if (i < nb){
ids[i] = i;
}
}
for (size_t i = 0; i < nq*TABLE_DIM; i++) {
xq[i] = dis_xt(gen);
}
// result data
//std::vector<long> nns_gt(k*nq);
std::vector<long> nns(k*nq); // nns = nearst neg search
//std::vector<float> dis_gt(k*nq);
std::vector<float> dis(k*nq);
// insert data
const int batch_size = 100;
for (int j = 0; j < nb / batch_size; ++j) {
stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data()+batch_size*j*TABLE_DIM, ids);
if (j == 200){ sleep(1);}
ASSERT_STATS(stat);
}
sleep(2); // wait until build index finish
engine::QueryResults results;
stat = db_->Query(TABLE_NAME, k, nq, xq.data(), results);
ASSERT_STATS(stat);
delete db_;
auto dummyDB = engine::DBFactory::Build(options);
dummyDB->DropAll();
delete dummyDB;
// TODO(linxj): add groundTruth assert
};
TEST_F(MySQLDBTest, ARHIVE_DISK_CHECK) {
auto options = GetOptions();
options.meta.archive_conf = engine::ArchiveConf("delete", "disk:1");
auto db_ = engine::DBFactory::Build(options);
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
std::vector<engine::meta::TableSchema> table_schema_array;
stat = db_->AllTables(table_schema_array);
ASSERT_STATS(stat);
bool bfound = false;
for(auto& schema : table_schema_array) {
if(schema.table_id_ == TABLE_NAME) {
bfound = true;
break;
}
}
ASSERT_TRUE(bfound);
engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_STATS(stat);
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
engine::IDNumbers vector_ids;
engine::IDNumbers target_ids;
uint64_t size;
db_->Size(size);
int64_t nb = 10;
std::vector<float> xb;
BuildVectors(nb, xb);
int loop = INSERT_LOOP;
for (auto i=0; i<loop; ++i) {
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
std::this_thread::sleep_for(std::chrono::seconds(1));
db_->Size(size);
LOG(DEBUG) << "size=" << size;
ASSERT_LE(size, 1 * engine::meta::G);
delete db_;
auto dummyDB = engine::DBFactory::Build(options);
dummyDB->DropAll();
delete dummyDB;
};
TEST_F(MySQLDBTest, DELETE_TEST) {
auto options = GetOptions();
options.meta.archive_conf = engine::ArchiveConf("delete", "disk:1");
auto db_ = engine::DBFactory::Build(options);
engine::meta::TableSchema table_info = BuildTableSchema();
engine::Status stat = db_->CreateTable(table_info);
// std::cout << stat.ToString() << std::endl;
engine::meta::TableSchema table_info_get;
table_info_get.table_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
ASSERT_STATS(stat);
// std::cout << "location: " << table_info_get.location_ << std::endl;
ASSERT_TRUE(boost::filesystem::exists(table_info_get.location_));
engine::IDNumbers vector_ids;
uint64_t size;
db_->Size(size);
int64_t nb = INSERT_LOOP;
std::vector<float> xb;
BuildVectors(nb, xb);
int loop = 20;
for (auto i=0; i<loop; ++i) {
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
std::vector<engine::meta::DateT> dates;
stat = db_->DeleteTable(TABLE_NAME, dates);
// std::cout << "5 sec start" << std::endl;
std::this_thread::sleep_for(std::chrono::seconds(5));
// std::cout << "5 sec finish" << std::endl;
ASSERT_TRUE(stat.ok());
// ASSERT_FALSE(boost::filesystem::exists(table_info_get.location_));
delete db_;
auto dummyDB = engine::DBFactory::Build(options);
dummyDB->DropAll();
delete dummyDB;
};

View File

@ -0,0 +1,333 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <thread>
#include <easylogging++.h>
#include <stdlib.h>
#include <time.h>
#include "utils.h"
#include "db/MySQLMetaImpl.h"
#include "db/Factories.h"
#include "db/Utils.h"
#include "db/MetaConsts.h"
#include "mysql++/mysql++.h"
#include <iostream>
using namespace zilliz::milvus::engine;
TEST_F(MySQLTest, TABLE_TEST) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
} catch(std::exception& ex) {
ASSERT_TRUE(false);
return;
}
int mode = Options::MODE::SINGLE;
meta::MySQLMetaImpl impl(options, mode);
auto table_id = "meta_test_table";
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
ASSERT_TRUE(status.ok());
auto gid = table.id_;
table.id_ = -1;
status = impl.DescribeTable(table);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table.id_, gid);
ASSERT_EQ(table.table_id_, table_id);
table.table_id_ = "not_found";
status = impl.DescribeTable(table);
ASSERT_TRUE(!status.ok());
table.table_id_ = table_id;
status = impl.CreateTable(table);
ASSERT_TRUE(status.ok());
table.table_id_ = "";
status = impl.CreateTable(table);
ASSERT_TRUE(status.ok());
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, TABLE_FILE_TEST) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
} catch(std::exception& ex) {
ASSERT_TRUE(false);
return;
}
int mode = Options::MODE::SINGLE;
meta::MySQLMetaImpl impl(options, mode);
auto table_id = "meta_test_table";
meta::TableSchema table;
table.table_id_ = table_id;
table.dimension_ = 256;
auto status = impl.CreateTable(table);
meta::TableFileSchema table_file;
table_file.table_id_ = table.table_id_;
status = impl.CreateTableFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, meta::TableFileSchema::NEW);
uint64_t cnt = 0;
status = impl.Count(table_id, cnt);
ASSERT_TRUE(status.ok());
ASSERT_EQ(cnt, 0UL);
auto file_id = table_file.file_id_;
auto new_file_type = meta::TableFileSchema::INDEX;
table_file.file_type_ = new_file_type;
status = impl.UpdateTableFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, new_file_type);
meta::DatesT dates;
dates.push_back(meta::Meta::GetDate());
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
ASSERT_FALSE(status.ok());
dates.clear();
for (auto i=2; i < 10; ++i) {
dates.push_back(meta::Meta::GetDateWithDelta(-1*i));
}
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
table_file.date_ = meta::Meta::GetDateWithDelta(-2);
status = impl.UpdateTableFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.date_, meta::Meta::GetDateWithDelta(-2));
ASSERT_FALSE(table_file.file_type_ == meta::TableFileSchema::TO_DELETE);
dates.clear();
dates.push_back(table_file.date_);
status = impl.DropPartitionsByDates(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
std::vector<size_t> ids = {table_file.id_};
meta::TableFilesSchema files;
status = impl.GetTableFiles(table_file.table_id_, ids, files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(files.size(), 1UL);
ASSERT_TRUE(files[0].file_type_ == meta::TableFileSchema::TO_DELETE);
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, ARCHIVE_TEST_DAYS) {
srand(time(0));
DBMetaOptions options;
try {
options = getDBMetaOptions();
} catch(std::exception& ex) {
ASSERT_TRUE(false);
return;
}
int days_num = rand() % 100;
std::stringstream ss;
ss << "days:" << days_num;
options.archive_conf = ArchiveConf("delete", ss.str());
int mode = Options::MODE::SINGLE;
meta::MySQLMetaImpl impl(options, mode);
auto table_id = "meta_test_table";
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
meta::TableFilesSchema files;
meta::TableFileSchema table_file;
table_file.table_id_ = table.table_id_;
auto cnt = 100;
long ts = utils::GetMicroSecTimeStamp();
std::vector<int> days;
std::vector<size_t> ids;
for (auto i=0; i<cnt; ++i) {
status = impl.CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::NEW;
int day = rand() % (days_num*2);
table_file.created_on_ = ts - day*meta::D_SEC*meta::US_PS - 10000;
status = impl.UpdateTableFile(table_file);
files.push_back(table_file);
days.push_back(day);
ids.push_back(table_file.id_);
}
impl.Archive();
int i = 0;
meta::TableFilesSchema files_get;
status = impl.GetTableFiles(table_file.table_id_, ids, files_get);
ASSERT_TRUE(status.ok());
for(auto& file : files_get) {
if (days[i] < days_num) {
ASSERT_EQ(file.file_type_, meta::TableFileSchema::NEW);
} else {
ASSERT_EQ(file.file_type_, meta::TableFileSchema::TO_DELETE);
}
i++;
}
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, ARCHIVE_TEST_DISK) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
} catch(std::exception& ex) {
ASSERT_TRUE(false);
return;
}
options.archive_conf = ArchiveConf("delete", "disk:11");
int mode = Options::MODE::SINGLE;
auto impl = meta::MySQLMetaImpl(options, mode);
auto table_id = "meta_test_group";
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
meta::TableFilesSchema files;
meta::TableFileSchema table_file;
table_file.table_id_ = table.table_id_;
auto cnt = 10;
auto each_size = 2UL;
std::vector<size_t> ids;
for (auto i=0; i<cnt; ++i) {
status = impl.CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::NEW;
table_file.size_ = each_size * meta::G;
status = impl.UpdateTableFile(table_file);
files.push_back(table_file);
ids.push_back(table_file.id_);
}
impl.Archive();
int i = 0;
meta::TableFilesSchema files_get;
status = impl.GetTableFiles(table_file.table_id_, ids, files_get);
ASSERT_TRUE(status.ok());
for(auto& file : files_get) {
if (i < 5) {
ASSERT_TRUE(file.file_type_ == meta::TableFileSchema::TO_DELETE);
} else {
ASSERT_EQ(file.file_type_, meta::TableFileSchema::NEW);
}
++i;
}
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}
TEST_F(MySQLTest, TABLE_FILES_TEST) {
DBMetaOptions options;
try {
options = getDBMetaOptions();
} catch(std::exception& ex) {
ASSERT_TRUE(false);
return;
}
int mode = Options::MODE::SINGLE;
auto impl = meta::MySQLMetaImpl(options, mode);
auto table_id = "meta_test_group";
meta::TableSchema table;
table.table_id_ = table_id;
auto status = impl.CreateTable(table);
int new_files_cnt = 4;
int raw_files_cnt = 5;
int to_index_files_cnt = 6;
int index_files_cnt = 7;
meta::TableFileSchema table_file;
table_file.table_id_ = table.table_id_;
for (auto i=0; i<new_files_cnt; ++i) {
status = impl.CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::NEW;
status = impl.UpdateTableFile(table_file);
}
for (auto i=0; i<raw_files_cnt; ++i) {
status = impl.CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::RAW;
status = impl.UpdateTableFile(table_file);
}
for (auto i=0; i<to_index_files_cnt; ++i) {
status = impl.CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::TO_INDEX;
status = impl.UpdateTableFile(table_file);
}
for (auto i=0; i<index_files_cnt; ++i) {
status = impl.CreateTableFile(table_file);
table_file.file_type_ = meta::TableFileSchema::INDEX;
status = impl.UpdateTableFile(table_file);
}
meta::TableFilesSchema files;
status = impl.FilesToIndex(files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(files.size(), to_index_files_cnt);
meta::DatePartionedTableFilesSchema dated_files;
status = impl.FilesToMerge(table.table_id_, dated_files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(dated_files[table_file.date_].size(), raw_files_cnt);
status = impl.FilesToIndex(files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(files.size(), to_index_files_cnt);
meta::DatesT dates = {table_file.date_};
status = impl.FilesToSearch(table_id, dates, dated_files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(dated_files[table_file.date_].size(),
to_index_files_cnt+raw_files_cnt+index_files_cnt);
status = impl.FilesToSearch(table_id, meta::DatesT(), dated_files);
ASSERT_TRUE(status.ok());
ASSERT_EQ(dated_files[table_file.date_].size(),
to_index_files_cnt+raw_files_cnt+index_files_cnt);
status = impl.DropAll();
ASSERT_TRUE(status.ok());
}

View File

@ -0,0 +1,124 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <gtest/gtest.h>
#include <thread>
#include <easylogging++.h>
#include <boost/filesystem.hpp>
#include "db/scheduler/TaskScheduler.h"
#include "db/scheduler/TaskDispatchStrategy.h"
#include "db/scheduler/TaskDispatchQueue.h"
#include "db/scheduler/task/SearchTask.h"
#include "db/scheduler/task/DeleteTask.h"
#include "db/scheduler/task/IndexLoadTask.h"
using namespace zilliz::milvus;
namespace {
engine::TableFileSchemaPtr CreateTabileFileStruct(size_t id, const std::string& table_id) {
auto file = std::make_shared<engine::meta::TableFileSchema>();
file->id_ = id;
file->table_id_ = table_id;
return file;
}
}
TEST(DBSchedulerTest, TASK_QUEUE_TEST) {
engine::TaskDispatchQueue queue;
queue.SetCapacity(1000);
queue.Put(nullptr);
ASSERT_EQ(queue.Size(), 1UL);
auto ptr = queue.Take();
ASSERT_EQ(ptr, nullptr);
ASSERT_TRUE(queue.Empty());
engine::SearchContextPtr context_ptr = std::make_shared<engine::SearchContext>(1, 1, nullptr);
for(size_t i = 0; i < 10; i++) {
auto file = CreateTabileFileStruct(i, "tbl");
context_ptr->AddIndexFile(file);
}
queue.Put(context_ptr);
ASSERT_EQ(queue.Size(), 10);
auto index_files = context_ptr->GetIndexMap();
ptr = queue.Front();
ASSERT_EQ(ptr->type(), engine::ScheduleTaskType::kIndexLoad);
engine::IndexLoadTaskPtr load_task = std::static_pointer_cast<engine::IndexLoadTask>(ptr);
ASSERT_EQ(load_task->file_->id_, index_files.begin()->first);
ptr = queue.Back();
ASSERT_EQ(ptr->type(), engine::ScheduleTaskType::kIndexLoad);
}
TEST(DBSchedulerTest, SEARCH_SCHEDULER_TEST) {
std::list<engine::ScheduleTaskPtr> task_list;
bool ret = engine::TaskDispatchStrategy::Schedule(nullptr, task_list);
ASSERT_FALSE(ret);
for(size_t i = 10; i < 30; i++) {
engine::IndexLoadTaskPtr task_ptr = std::make_shared<engine::IndexLoadTask>();
task_ptr->file_ = CreateTabileFileStruct(i, "tbl");
task_list.push_back(task_ptr);
}
engine::SearchContextPtr context_ptr = std::make_shared<engine::SearchContext>(1, 1, nullptr);
for(size_t i = 0; i < 20; i++) {
auto file = CreateTabileFileStruct(i, "tbl");
context_ptr->AddIndexFile(file);
}
ret = engine::TaskDispatchStrategy::Schedule(context_ptr, task_list);
ASSERT_TRUE(ret);
ASSERT_EQ(task_list.size(), 30);
}
TEST(DBSchedulerTest, DELETE_SCHEDULER_TEST) {
std::list<engine::ScheduleTaskPtr> task_list;
bool ret = engine::TaskDispatchStrategy::Schedule(nullptr, task_list);
ASSERT_FALSE(ret);
const std::string table_id = "to_delete_table";
for(size_t i = 0; i < 10; i++) {
engine::IndexLoadTaskPtr task_ptr = std::make_shared<engine::IndexLoadTask>();
task_ptr->file_ = CreateTabileFileStruct(i, table_id);
task_list.push_back(task_ptr);
}
for(size_t i = 0; i < 10; i++) {
engine::IndexLoadTaskPtr task_ptr = std::make_shared<engine::IndexLoadTask>();
task_ptr->file_ = CreateTabileFileStruct(i, "other_table");
task_list.push_back(task_ptr);
}
engine::meta::Meta::Ptr meta_ptr;
engine::DeleteContextPtr context_ptr = std::make_shared<engine::DeleteContext>(table_id, meta_ptr);
ret = engine::TaskDispatchStrategy::Schedule(context_ptr, task_list);
ASSERT_TRUE(ret);
ASSERT_EQ(task_list.size(), 21);
auto temp_list = task_list;
for(size_t i = 0; ; i++) {
engine::ScheduleTaskPtr task_ptr = temp_list.front();
temp_list.pop_front();
if(task_ptr->type() == engine::ScheduleTaskType::kDelete) {
ASSERT_EQ(i, 10);
break;
}
}
context_ptr = std::make_shared<engine::DeleteContext>("no_task_table", meta_ptr);
ret = engine::TaskDispatchStrategy::Schedule(context_ptr, task_list);
ASSERT_TRUE(ret);
ASSERT_EQ(task_list.size(), 22);
engine::ScheduleTaskPtr task_ptr = task_list.front();
ASSERT_EQ(task_ptr->type(), engine::ScheduleTaskType::kDelete);
}

View File

@ -11,9 +11,29 @@
#include "utils.h"
#include "db/Factories.h"
#include "db/Options.h"
INITIALIZE_EASYLOGGINGPP
using namespace zilliz::milvus;
static std::string uri;
class DBTestEnvironment : public ::testing::Environment {
public:
// explicit DBTestEnvironment(std::string uri) : uri_(uri) {}
static std::string getURI() {
return uri;
}
void SetUp() override {
getURI();
}
};
void ASSERT_STATS(engine::Status& stat) {
ASSERT_TRUE(stat.ok());
if(!stat.ok()) {
@ -21,6 +41,7 @@ void ASSERT_STATS(engine::Status& stat) {
}
}
void DBTest::InitLog() {
el::Configurations defaultConf;
defaultConf.setToDefault();
@ -32,6 +53,7 @@ void DBTest::InitLog() {
engine::Options DBTest::GetOptions() {
auto options = engine::OptionsFactory::Build();
options.meta.path = "/tmp/milvus_test";
options.meta.backend_uri = "sqlite://:@:/";
return options;
}
@ -50,6 +72,7 @@ engine::Options DBTest2::GetOptions() {
auto options = engine::OptionsFactory::Build();
options.meta.path = "/tmp/milvus_test";
options.meta.archive_conf = engine::ArchiveConf("delete", "disk:1");
options.meta.backend_uri = "sqlite://:@:/";
return options;
}
@ -61,3 +84,34 @@ void MetaTest::SetUp() {
void MetaTest::TearDown() {
impl_->DropAll();
}
zilliz::milvus::engine::DBMetaOptions MySQLTest::getDBMetaOptions() {
// std::string path = "/tmp/milvus_test";
// engine::DBMetaOptions options = engine::DBMetaOptionsFactory::Build(path);
zilliz::milvus::engine::DBMetaOptions options;
options.path = "/tmp/milvus_test";
options.backend_uri = DBTestEnvironment::getURI();
if(options.backend_uri.empty()) {
throw std::exception();
}
return options;
}
zilliz::milvus::engine::Options MySQLDBTest::GetOptions() {
auto options = engine::OptionsFactory::Build();
options.meta.path = "/tmp/milvus_test";
options.meta.backend_uri = DBTestEnvironment::getURI();
return options;
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
if (argc > 1) {
uri = argv[1];
}
// std::cout << uri << std::endl;
::testing::AddGlobalTestEnvironment(new DBTestEnvironment);
return RUN_ALL_TESTS();
}

View File

@ -8,9 +8,11 @@
#include <gtest/gtest.h>
#include <chrono>
//#include <src/db/MySQLMetaImpl.h>
#include "db/DB.h"
#include "db/DBMetaImpl.h"
#include "db/MySQLMetaImpl.h"
#define TIMING
@ -28,9 +30,28 @@
#define STOP_TIMER(name)
#endif
void ASSERT_STATS(zilliz::milvus::engine::Status& stat);
//class TestEnv : public ::testing::Environment {
//public:
//
// static std::string getURI() {
// if (const char* uri = std::getenv("MILVUS_DBMETA_URI")) {
// return uri;
// }
// else {
// return "";
// }
// }
//
// void SetUp() override {
// getURI();
// }
//
//};
//
//::testing::Environment* const test_env =
// ::testing::AddGlobalTestEnvironment(new TestEnv);
class DBTest : public ::testing::Test {
protected:
@ -55,3 +76,14 @@ protected:
virtual void SetUp() override;
virtual void TearDown() override;
};
class MySQLTest : public ::testing::Test {
protected:
// std::shared_ptr<zilliz::milvus::engine::meta::MySQLMetaImpl> impl_;
zilliz::milvus::engine::DBMetaOptions getDBMetaOptions();
};
class MySQLDBTest : public ::testing::Test {
protected:
zilliz::milvus::engine::Options GetOptions();
};

View File

@ -1,184 +1,184 @@
////
//// Created by zilliz on 19-5-13.
////
//
// Created by zilliz on 19-5-13.
//#include "utils/Log.h"
//#include "license/LicenseCheck.h"
//#include "utils/Error.h"
//
//#include <gtest/gtest.h>
//#include <iostream>
//
//
//using namespace zilliz::milvus;
//
//TEST(LicenseLibraryTest, CHECK_TEST) {
//
//
// server::ServerError err;
// // 1. Set license file name
// std::string license_file_path("/tmp/milvus/abc.license");
//
// // 2. Legality check
// err = server::LicenseCheck::LegalityCheck(license_file_path);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
//}
//
//TEST(LicenseLibraryTest, CHECK_ERROR1_TEST){
//
// server::ServerError err;
// // 1. Set license file name
// std::string license_file_path("/tmp/milvus/abc");
//
// // 2. Legality check
// err = server::LicenseCheck::LegalityCheck(license_file_path);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//}
//
//TEST(LicenseLibraryTest, CHECK_ERROR2_TEST){
//
// server::ServerError err;
// // 1. Set license file name
// std::string license_file_path("/tmp/milvus/abc.license");
//
// // 2. Define output var
// int device_count;
// std::map<int, std::string> uuid_encryption_map;
// time_t starting_time;
// time_t end_time;
//
// // 3. Read License File
// err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 4. Change device count
// ++device_count;
// err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 5. Legality check
// err = server::LicenseCheck::LegalityCheck(license_file_path);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//}
//
//TEST(LicenseLibraryTest, CHECK_ERROR3_TEST){
//
// server::ServerError err;
// // 1. Set license file name
// std::string license_file_path("/tmp/milvus/abc.license");
//
// // 2. Define output var
// int device_count;
// std::map<int, std::string> uuid_encryption_map;
// time_t starting_time;
// time_t end_time;
//
// // 3. Read License File
// err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 4. Change device count
// if(device_count) uuid_encryption_map[0]+="u";
// err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 5. Legality check
// err = server::LicenseCheck::LegalityCheck(license_file_path);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//}
//
//TEST(LicenseLibraryTest, CHECK_ERROR4_1_TEST){
//
// server::ServerError err;
// // 1. Set license file name
// std::string license_file_path("/tmp/milvus/abc.license");
//
// // 2. Define output var
// int device_count;
// std::map<int, std::string> uuid_encryption_map;
// time_t starting_time;
// time_t end_time;
//
// // 3. Read License File
// err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 4. Change starting time
// time_t system_time;
// err = server::LicenseLibrary::GetSystemTime(system_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// system_time+=60*60*24;
//
// err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
// device_count,
// uuid_encryption_map,
// system_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 5. Legality check
// err = server::LicenseCheck::LegalityCheck(license_file_path);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//}
//
//TEST(LicenseLibraryTest, CHECK_ERROR4_2_TEST){
//
// server::ServerError err;
// // 1. Set license file name
// std::string license_file_path("/tmp/milvus/abc.license");
//
// // 2. Define output var
// int device_count;
// std::map<int, std::string> uuid_encryption_map;
// time_t starting_time;
// time_t end_time;
//
// // 3. Read License File
// err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 4. Change end time
// time_t system_time;
// err = server::LicenseLibrary::GetSystemTime(system_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// system_time-=100;
//
// err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
// device_count,
// uuid_encryption_map,
// starting_time,
// system_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 5. Legality check
// err = server::LicenseCheck::LegalityCheck(license_file_path);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//}
//
#include "utils/Log.h"
#include "license/LicenseCheck.h"
#include "utils/Error.h"
#include <gtest/gtest.h>
#include <iostream>
using namespace zilliz::milvus;
TEST(LicenseLibraryTest, CHECK_TEST) {
server::ServerError err;
// 1. Set license file name
std::string license_file_path("/tmp/milvus/abc.license");
// 2. Legality check
err = server::LicenseCheck::LegalityCheck(license_file_path);
ASSERT_EQ(err, server::SERVER_SUCCESS);
}
TEST(LicenseLibraryTest, CHECK_ERROR1_TEST){
server::ServerError err;
// 1. Set license file name
std::string license_file_path("/tmp/milvus/abc");
// 2. Legality check
err = server::LicenseCheck::LegalityCheck(license_file_path);
ASSERT_EQ(err, server::SERVER_SUCCESS);
}
TEST(LicenseLibraryTest, CHECK_ERROR2_TEST){
server::ServerError err;
// 1. Set license file name
std::string license_file_path("/tmp/milvus/abc.license");
// 2. Define output var
int device_count;
std::map<int, std::string> uuid_encryption_map;
time_t starting_time;
time_t end_time;
// 3. Read License File
err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 4. Change device count
++device_count;
err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 5. Legality check
err = server::LicenseCheck::LegalityCheck(license_file_path);
ASSERT_EQ(err, server::SERVER_SUCCESS);
}
TEST(LicenseLibraryTest, CHECK_ERROR3_TEST){
server::ServerError err;
// 1. Set license file name
std::string license_file_path("/tmp/milvus/abc.license");
// 2. Define output var
int device_count;
std::map<int, std::string> uuid_encryption_map;
time_t starting_time;
time_t end_time;
// 3. Read License File
err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 4. Change device count
if(device_count) uuid_encryption_map[0]+="u";
err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 5. Legality check
err = server::LicenseCheck::LegalityCheck(license_file_path);
ASSERT_EQ(err, server::SERVER_SUCCESS);
}
TEST(LicenseLibraryTest, CHECK_ERROR4_1_TEST){
server::ServerError err;
// 1. Set license file name
std::string license_file_path("/tmp/milvus/abc.license");
// 2. Define output var
int device_count;
std::map<int, std::string> uuid_encryption_map;
time_t starting_time;
time_t end_time;
// 3. Read License File
err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 4. Change starting time
time_t system_time;
err = server::LicenseLibrary::GetSystemTime(system_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
system_time+=60*60*24;
err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
device_count,
uuid_encryption_map,
system_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 5. Legality check
err = server::LicenseCheck::LegalityCheck(license_file_path);
ASSERT_EQ(err, server::SERVER_SUCCESS);
}
TEST(LicenseLibraryTest, CHECK_ERROR4_2_TEST){
server::ServerError err;
// 1. Set license file name
std::string license_file_path("/tmp/milvus/abc.license");
// 2. Define output var
int device_count;
std::map<int, std::string> uuid_encryption_map;
time_t starting_time;
time_t end_time;
// 3. Read License File
err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 4. Change end time
time_t system_time;
err = server::LicenseLibrary::GetSystemTime(system_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
system_time-=100;
err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
device_count,
uuid_encryption_map,
starting_time,
system_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 5. Legality check
err = server::LicenseCheck::LegalityCheck(license_file_path);
ASSERT_EQ(err, server::SERVER_SUCCESS);
}

View File

@ -1,214 +1,214 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#include "utils/Log.h"
#include "license/LicenseLibrary.h"
#include "utils/Error.h"
#include <gtest/gtest.h>
#include <iostream>
using namespace zilliz::milvus;
TEST(LicenseLibraryTest, FILE_EXISTENT_TEST) {
std::string hosts_file = "/etc/hosts";
ASSERT_EQ(server::LicenseLibrary::IsFileExistent(hosts_file), true);
std::string no_exist_file = "/temp/asdaasd";
ASSERT_EQ(server::LicenseLibrary::IsFileExistent(no_exist_file), false);
std::string directory = "/tmp";
ASSERT_EQ(server::LicenseLibrary::IsFileExistent(directory), false);
}
TEST(LicenseLibraryTest, GPU_INFO_TEST) {
int device_count = 0;
server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
ASSERT_EQ(err, server::SERVER_SUCCESS);
std::cout << "Device Count: " << device_count << std::endl;
std::vector<std::string> uuid_array;
err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
ASSERT_EQ(err, server::SERVER_SUCCESS);
for (long i = 0; i < device_count; ++i) {
std::cout << "Device Id: " << i << ", UUID: " << uuid_array[i] << std::endl;
}
std::vector<std::string> uuid_md5_array;
err = server::LicenseLibrary::GetUUIDMD5(device_count, uuid_array, uuid_md5_array);
ASSERT_EQ(err, server::SERVER_SUCCESS);
for (long i = 0; i < device_count; ++i) {
std::cout << "Device Id: " << i << ", UUID: " << uuid_array[i] << ", UUID_MD5: " << uuid_md5_array[i]
<< std::endl;
}
std::vector<std::string> uuid_sha256_array;
err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
ASSERT_EQ(err, server::SERVER_SUCCESS);
for (long i = 0; i < device_count; ++i) {
std::cout << "Device Id: " << i << ", UUID: " << uuid_array[i] << ", UUID_SHA256: "
<< uuid_sha256_array[i] << std::endl;
}
time_t systemtime;
err = server::LicenseLibrary::GetSystemTime(systemtime);
ASSERT_EQ(err, server::SERVER_SUCCESS);
std::cout << "System Time: " << systemtime << std::endl;
}
TEST(LicenseLibraryTest, LICENSE_FILE_TEST) {
// 0. File check
std::string test("/tmp/a.test");
bool is = server::LicenseLibrary::IsFileExistent(test);
ASSERT_EQ(is, false);
// 1. Get Device Count
int device_count = 0;
server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 2. Get All GPU UUID
std::vector<std::string> uuid_array;
err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 3. Get UUID SHA256
std::vector<std::string> uuid_sha256_array;
err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 4. Generate GPU ID map with GPU UUID
std::map<int, std::string> uuid_encrption_map;
for (int i = 0; i < device_count; ++i) {
uuid_encrption_map[i] = uuid_sha256_array[i];
}
// 5.GPU_info File
std::string GPU_info_file_path("/tmp/milvus.info");
// 6. Generate GPU_info File
err = server::LicenseLibrary::GPUinfoFileSerialization(GPU_info_file_path,
device_count,
uuid_encrption_map);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 7. Define output var
int output_info_device_count = 0;
std::map<int, std::string> output_info_uuid_encrption_map;
// 8. Read GPU_info File
err = server::LicenseLibrary::GPUinfoFileDeserialization(GPU_info_file_path,
output_info_device_count,
output_info_uuid_encrption_map);
ASSERT_EQ(err, server::SERVER_SUCCESS);
ASSERT_EQ(device_count, output_info_device_count);
for (int i = 0; i < device_count; ++i) {
ASSERT_EQ(uuid_encrption_map[i], output_info_uuid_encrption_map[i]);
}
// 9. Set license file name
std::string license_file_path("/tmp/milvus.license");
// 10. Get System Time/starting_time ans End Time
time_t sysyem_time;
err = server::LicenseLibrary::GetSystemTime(sysyem_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 11.GetDateTime
time_t starting_time;
time_t end_time;
const char *string_starting_time = "2019-05-10";
const char *string_end_time = "2022-05-10";
err = server::LicenseLibrary::GetDateTime(string_starting_time, starting_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
err = server::LicenseLibrary::GetDateTime(string_end_time, end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 12. Generate License File
err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
device_count,
uuid_encrption_map,
starting_time,
end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
// 13. Define output var
int output_device_count = 0;
std::map<int, std::string> output_uuid_encrption_map;
time_t output_starting_time;
time_t output_end_time;
// 14. Read License File
err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
output_device_count,
output_uuid_encrption_map,
output_starting_time,
output_end_time);
ASSERT_EQ(err, server::SERVER_SUCCESS);
ASSERT_EQ(device_count, output_device_count);
ASSERT_EQ(starting_time, output_starting_time);
ASSERT_EQ(end_time, output_end_time);
for (int i = 0; i < device_count; ++i) {
ASSERT_EQ(uuid_encrption_map[i], output_uuid_encrption_map[i]);
}
// // 15. Get License File Attribute
// time_t update_time;
// off_t file_size;
// err = server::LicenseLibrary::GetFileUpdateTimeAndSize(license_file_path, update_time, file_size);
///*******************************************************************************
// * Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// * Unauthorized copying of this file, via any medium is strictly prohibited.
// * Proprietary and confidential.
// ******************************************************************************/
//
//
//#include "utils/Log.h"
//#include "license/LicenseLibrary.h"
//#include "utils/Error.h"
//
//#include <gtest/gtest.h>
//#include <iostream>
//
//
//using namespace zilliz::milvus;
//
//TEST(LicenseLibraryTest, FILE_EXISTENT_TEST) {
//
// std::string hosts_file = "/etc/hosts";
// ASSERT_EQ(server::LicenseLibrary::IsFileExistent(hosts_file), true);
//
// std::string no_exist_file = "/temp/asdaasd";
// ASSERT_EQ(server::LicenseLibrary::IsFileExistent(no_exist_file), false);
//
// std::string directory = "/tmp";
// ASSERT_EQ(server::LicenseLibrary::IsFileExistent(directory), false);
//}
//
//TEST(LicenseLibraryTest, GPU_INFO_TEST) {
//
// int device_count = 0;
// server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// std::cout << "Device Count: " << device_count << std::endl;
//
// std::vector<std::string> uuid_array;
// err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// for (long i = 0; i < device_count; ++i) {
// std::cout << "Device Id: " << i << ", UUID: " << uuid_array[i] << std::endl;
// }
//
// std::vector<std::string> uuid_md5_array;
// err = server::LicenseLibrary::GetUUIDMD5(device_count, uuid_array, uuid_md5_array);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// for (long i = 0; i < device_count; ++i) {
// std::cout << "Device Id: " << i << ", UUID: " << uuid_array[i] << ", UUID_MD5: " << uuid_md5_array[i]
// << std::endl;
// }
//
// std::vector<std::string> uuid_sha256_array;
// err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// for (long i = 0; i < device_count; ++i) {
// std::cout << "Device Id: " << i << ", UUID: " << uuid_array[i] << ", UUID_SHA256: "
// << uuid_sha256_array[i] << std::endl;
// }
//
// time_t systemtime;
// err = server::LicenseLibrary::GetSystemTime(systemtime);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// std::cout << "System Time: " << systemtime << std::endl;
//
//}
//
//TEST(LicenseLibraryTest, LICENSE_FILE_TEST) {
//
// // 0. File check
// std::string test("/tmp/a.test");
// bool is = server::LicenseLibrary::IsFileExistent(test);
// ASSERT_EQ(is, false);
//
// // 1. Get Device Count
// int device_count = 0;
// server::ServerError err = server::LicenseLibrary::GetDeviceCount(device_count);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 16. Get License File MD5
// std::string file_md5;
// err = server::LicenseLibrary::GetFileMD5(license_file_path, file_md5);
// // 2. Get All GPU UUID
// std::vector<std::string> uuid_array;
// err = server::LicenseLibrary::GetUUID(device_count, uuid_array);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// // 17. Generate Secret File
// std::string secret_file_path("/tmp/milvus.secret");
// err = server::LicenseLibrary::SecretFileSerialization(secret_file_path,
// update_time,
// file_size,
// starting_time,
// end_time,
// file_md5);
//
// // 3. Get UUID SHA256
// std::vector<std::string> uuid_sha256_array;
// err = server::LicenseLibrary::GetUUIDSHA256(device_count, uuid_array, uuid_sha256_array);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// // 18. Define output var
// time_t output_update_time;
// off_t output_file_size;
//
// // 4. Generate GPU ID map with GPU UUID
// std::map<int, std::string> uuid_encrption_map;
// for (int i = 0; i < device_count; ++i) {
// uuid_encrption_map[i] = uuid_sha256_array[i];
// }
//
// // 5.GPU_info File
// std::string GPU_info_file_path("/tmp/milvus.info");
//
//
// // 6. Generate GPU_info File
// err = server::LicenseLibrary::GPUinfoFileSerialization(GPU_info_file_path,
// device_count,
// uuid_encrption_map);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 7. Define output var
// int output_info_device_count = 0;
// std::map<int, std::string> output_info_uuid_encrption_map;
//
// // 8. Read GPU_info File
// err = server::LicenseLibrary::GPUinfoFileDeserialization(GPU_info_file_path,
// output_info_device_count,
// output_info_uuid_encrption_map);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// ASSERT_EQ(device_count, output_info_device_count);
// for (int i = 0; i < device_count; ++i) {
// ASSERT_EQ(uuid_encrption_map[i], output_info_uuid_encrption_map[i]);
// }
//
// // 9. Set license file name
// std::string license_file_path("/tmp/milvus.license");
//
// // 10. Get System Time/starting_time ans End Time
// time_t sysyem_time;
// err = server::LicenseLibrary::GetSystemTime(sysyem_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 11.GetDateTime
// time_t starting_time;
// time_t end_time;
// const char *string_starting_time = "2019-05-10";
// const char *string_end_time = "2022-05-10";
// err = server::LicenseLibrary::GetDateTime(string_starting_time, starting_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
// err = server::LicenseLibrary::GetDateTime(string_end_time, end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 12. Generate License File
// err = server::LicenseLibrary::LicenseFileSerialization(license_file_path,
// device_count,
// uuid_encrption_map,
// starting_time,
// end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// // 13. Define output var
// int output_device_count = 0;
// std::map<int, std::string> output_uuid_encrption_map;
// time_t output_starting_time;
// time_t output_end_time;
// std::string output_file_md5;
// // 19. Read License File
// err = server::LicenseLibrary::SecretFileDeserialization(secret_file_path,
// output_update_time,
// output_file_size,
// output_starting_time,
// output_end_time,
// output_file_md5);
//
// // 14. Read License File
// err = server::LicenseLibrary::LicenseFileDeserialization(license_file_path,
// output_device_count,
// output_uuid_encrption_map,
// output_starting_time,
// output_end_time);
// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
// ASSERT_EQ(update_time, output_update_time);
// ASSERT_EQ(file_size, output_file_size);
// ASSERT_EQ(device_count, output_device_count);
// ASSERT_EQ(starting_time, output_starting_time);
// ASSERT_EQ(end_time, output_end_time);
// ASSERT_EQ(file_md5, output_file_md5);
}
//
// for (int i = 0; i < device_count; ++i) {
// ASSERT_EQ(uuid_encrption_map[i], output_uuid_encrption_map[i]);
// }
//
//// // 15. Get License File Attribute
//// time_t update_time;
//// off_t file_size;
//// err = server::LicenseLibrary::GetFileUpdateTimeAndSize(license_file_path, update_time, file_size);
//// ASSERT_EQ(err, server::SERVER_SUCCESS);
////
//// // 16. Get License File MD5
//// std::string file_md5;
//// err = server::LicenseLibrary::GetFileMD5(license_file_path, file_md5);
//// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
//
//
//// // 17. Generate Secret File
//// std::string secret_file_path("/tmp/milvus.secret");
//// err = server::LicenseLibrary::SecretFileSerialization(secret_file_path,
//// update_time,
//// file_size,
//// starting_time,
//// end_time,
//// file_md5);
//// ASSERT_EQ(err, server::SERVER_SUCCESS);
//
//// // 18. Define output var
//// time_t output_update_time;
//// off_t output_file_size;
//// time_t output_starting_time;
//// time_t output_end_time;
//// std::string output_file_md5;
//
//// // 19. Read License File
//// err = server::LicenseLibrary::SecretFileDeserialization(secret_file_path,
//// output_update_time,
//// output_file_size,
//// output_starting_time,
//// output_end_time,
//// output_file_md5);
//// ASSERT_EQ(err, server::SERVER_SUCCESS);
////
//// ASSERT_EQ(update_time, output_update_time);
//// ASSERT_EQ(file_size, output_file_size);
//// ASSERT_EQ(starting_time, output_starting_time);
//// ASSERT_EQ(end_time, output_end_time);
//// ASSERT_EQ(file_md5, output_file_md5);
//
//
//}

View File

@ -17,6 +17,7 @@ aux_source_directory(../../src/config config_files)
aux_source_directory(../../src/cache cache_srcs)
aux_source_directory(../../src/wrapper wrapper_src)
aux_source_directory(../../src/metrics metrics_src)
aux_source_directory(./ test_srcs)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/scheduler scheduler_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/scheduler/context scheduler_context_files)
@ -35,6 +36,7 @@ link_directories("/usr/local/cuda/lib64")
#include_directories(../db/utils.h)
include_directories(../../src/metrics)
include_directories(/usr/include/mysql)
#set(metrics_src_files
# ../../src/metrics/Metrics.cpp
@ -47,15 +49,13 @@ include_directories(../../src/metrics)
# )
set(count_test_src
${unittest_srcs}
${config_files}
${cache_srcs}
${db_srcs}
${db_scheduler_srcs}
${wrapper_src}
${metrics_src}
metrics_test.cpp
../db/utils.cpp
${test_srcs}
)
@ -74,6 +74,7 @@ target_link_libraries(metrics_test
gtest
pthread
z
mysqlpp
${unittest_libs}
)

View File

@ -0,0 +1,56 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#include "metrics/Metrics.h"
#include <gtest/gtest.h>
#include <iostream>
using namespace zilliz::milvus;
TEST(MetricbaseTest, METRICBASE_TEST){
server::MetricsBase instance = server::MetricsBase::GetInstance();
instance.Init();
server::SystemInfo::GetInstance().Init();
instance.AddVectorsSuccessTotalIncrement();
instance.AddVectorsFailTotalIncrement();
instance.AddVectorsDurationHistogramOberve(1.0);
instance.RawFileSizeHistogramObserve(1.0);
instance.IndexFileSizeHistogramObserve(1.0);
instance.BuildIndexDurationSecondsHistogramObserve(1.0);
instance.CacheUsageGaugeSet(1.0);
instance.MetaAccessTotalIncrement();
instance.MetaAccessDurationSecondsHistogramObserve(1.0);
instance.FaissDiskLoadDurationSecondsHistogramObserve(1.0);
instance.FaissDiskLoadSizeBytesHistogramObserve(1.0);
instance.FaissDiskLoadIOSpeedGaugeSet(1.0);
instance.CacheAccessTotalIncrement();
instance.MemTableMergeDurationSecondsHistogramObserve(1.0);
instance.SearchIndexDataDurationSecondsHistogramObserve(1.0);
instance.SearchRawDataDurationSecondsHistogramObserve(1.0);
instance.IndexFileSizeTotalIncrement();
instance.RawFileSizeTotalIncrement();
instance.IndexFileSizeGaugeSet(1.0);
instance.RawFileSizeGaugeSet(1.0);
instance.QueryResponseSummaryObserve(1.0);
instance.DiskStoreIOSpeedGaugeSet(1.0);
instance.DataFileSizeGaugeSet(1.0);
instance.AddVectorsSuccessGaugeSet(1.0);
instance.AddVectorsFailGaugeSet(1.0);
instance.QueryVectorResponseSummaryObserve(1.0, 1);
instance.QueryVectorResponsePerSecondGaugeSet(1.0);
instance.CPUUsagePercentSet();
instance.RAMUsagePercentSet();
instance.QueryResponsePerSecondGaugeSet(1.0);
instance.GPUPercentGaugeSet();
instance.GPUMemoryUsageGaugeSet();
instance.AddVectorsPerSecondGaugeSet(1,1,1);
instance.QueryIndexTypePerSecondSet("IVF", 1.0);
instance.ConnectionGaugeIncrement();
instance.ConnectionGaugeDecrement();
instance.KeepingAliveCounterIncrement();
instance.OctetsSet();
}

View File

@ -15,7 +15,7 @@
#include <cache/CpuCacheMgr.h>
#include "metrics/Metrics.h"
#include "../db/utils.h"
#include "utils.h"
#include "db/DB.h"
#include "db/DBMetaImpl.h"
#include "db/Factories.h"
@ -24,8 +24,7 @@
using namespace zilliz::milvus;
TEST_F(DBTest, Metric_Tes) {
TEST_F(MetricTest, Metric_Tes) {
server::SystemInfo::GetInstance().Init();
// server::Metrics::GetInstance().Init();
@ -33,7 +32,7 @@ TEST_F(DBTest, Metric_Tes) {
server::Metrics::GetInstance().Init();
// server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr());
zilliz::milvus::cache::CpuCacheMgr::GetInstance()->SetCapacity(2UL*1024*1024*1024);
zilliz::milvus::cache::CpuCacheMgr::GetInstance()->SetCapacity(1UL*1024*1024*1024);
std::cout<<zilliz::milvus::cache::CpuCacheMgr::GetInstance()->CacheCapacity()<<std::endl;
static const std::string group_name = "test_group";
@ -102,7 +101,7 @@ TEST_F(DBTest, Metric_Tes) {
}
});
int loop = 10;
int loop = 10000;
for (auto i=0; i<loop; ++i) {
if (i==40) {

View File

@ -0,0 +1,57 @@
/*******************************************************************************
* Copyright (Zilliz) - All Rights Reserved
* Unauthorized copying of this file, via any medium is strictly prohibited.
* Proprietary and confidential.
******************************************************************************/
#include "metrics/PrometheusMetrics.h"
#include <gtest/gtest.h>
#include <iostream>
using namespace zilliz::milvus;
TEST(PrometheusTest, PROMETHEUS_TEST){
server::PrometheusMetrics instance = server::PrometheusMetrics::GetInstance();
instance.Init();
instance.SetStartup(true);
server::SystemInfo::GetInstance().Init();
instance.AddVectorsSuccessTotalIncrement();
instance.AddVectorsFailTotalIncrement();
instance.AddVectorsDurationHistogramOberve(1.0);
instance.RawFileSizeHistogramObserve(1.0);
instance.IndexFileSizeHistogramObserve(1.0);
instance.BuildIndexDurationSecondsHistogramObserve(1.0);
instance.CacheUsageGaugeSet(1.0);
instance.MetaAccessTotalIncrement();
instance.MetaAccessDurationSecondsHistogramObserve(1.0);
instance.FaissDiskLoadDurationSecondsHistogramObserve(1.0);
instance.FaissDiskLoadSizeBytesHistogramObserve(1.0);
instance.FaissDiskLoadIOSpeedGaugeSet(1.0);
instance.CacheAccessTotalIncrement();
instance.MemTableMergeDurationSecondsHistogramObserve(1.0);
instance.SearchIndexDataDurationSecondsHistogramObserve(1.0);
instance.SearchRawDataDurationSecondsHistogramObserve(1.0);
instance.IndexFileSizeTotalIncrement();
instance.RawFileSizeTotalIncrement();
instance.IndexFileSizeGaugeSet(1.0);
instance.RawFileSizeGaugeSet(1.0);
instance.QueryResponseSummaryObserve(1.0);
instance.DiskStoreIOSpeedGaugeSet(1.0);
instance.DataFileSizeGaugeSet(1.0);
instance.AddVectorsSuccessGaugeSet(1.0);
instance.AddVectorsFailGaugeSet(1.0);
instance.QueryVectorResponseSummaryObserve(1.0, 1);
instance.QueryVectorResponsePerSecondGaugeSet(1.0);
instance.CPUUsagePercentSet();
instance.RAMUsagePercentSet();
instance.QueryResponsePerSecondGaugeSet(1.0);
instance.GPUPercentGaugeSet();
instance.GPUMemoryUsageGaugeSet();
instance.AddVectorsPerSecondGaugeSet(1,1,1);
instance.QueryIndexTypePerSecondSet("IVF", 1.0);
instance.ConnectionGaugeIncrement();
instance.ConnectionGaugeDecrement();
instance.KeepingAliveCounterIncrement();
instance.OctetsSet();
}

View File

@ -0,0 +1,79 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <easylogging++.h>
#include <thread>
#include <boost/filesystem.hpp>
#include "utils.h"
#include "db/Factories.h"
#include "db/Options.h"
INITIALIZE_EASYLOGGINGPP
using namespace zilliz::milvus;
static std::string uri;
class DBTestEnvironment : public ::testing::Environment {
public:
// explicit DBTestEnvironment(std::string uri) : uri_(uri) {}
static std::string getURI() {
return uri;
}
void SetUp() override {
getURI();
}
};
void ASSERT_STATS(engine::Status& stat) {
ASSERT_TRUE(stat.ok());
if(!stat.ok()) {
std::cout << stat.ToString() << std::endl;
}
}
void MetricTest::InitLog() {
el::Configurations defaultConf;
defaultConf.setToDefault();
defaultConf.set(el::Level::Debug,
el::ConfigurationType::Format, "[%thread-%datetime-%level]: %msg (%fbase:%line)");
el::Loggers::reconfigureLogger("default", defaultConf);
}
engine::Options MetricTest::GetOptions() {
auto options = engine::OptionsFactory::Build();
options.meta.path = "/tmp/milvus_test";
options.meta.backend_uri = "sqlite://:@:/";
return options;
}
void MetricTest::SetUp() {
InitLog();
auto options = GetOptions();
db_ = engine::DBFactory::Build(options);
}
void MetricTest::TearDown() {
delete db_;
boost::filesystem::remove_all("/tmp/milvus_test");
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
if (argc > 1) {
uri = argv[1];
}
// std::cout << uri << std::endl;
::testing::AddGlobalTestEnvironment(new DBTestEnvironment);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,64 @@
////////////////////////////////////////////////////////////////////////////////
// Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
// Unauthorized copying of this file, via any medium is strictly prohibited.
// Proprietary and confidential.
////////////////////////////////////////////////////////////////////////////////
#pragma once
#include <gtest/gtest.h>
#include <chrono>
//#include <src/db/MySQLMetaImpl.h>
#include "db/DB.h"
#include "db/DBMetaImpl.h"
#include "db/MySQLMetaImpl.h"
#define TIMING
#ifdef TIMING
#define INIT_TIMER auto start = std::chrono::high_resolution_clock::now();
#define START_TIMER start = std::chrono::high_resolution_clock::now();
#define STOP_TIMER(name) LOG(DEBUG) << "RUNTIME of " << name << ": " << \
std::chrono::duration_cast<std::chrono::milliseconds>( \
std::chrono::high_resolution_clock::now()-start \
).count() << " ms ";
#else
#define INIT_TIMER
#define START_TIMER
#define STOP_TIMER(name)
#endif
void ASSERT_STATS(zilliz::milvus::engine::Status& stat);
//class TestEnv : public ::testing::Environment {
//public:
//
// static std::string getURI() {
// if (const char* uri = std::getenv("MILVUS_DBMETA_URI")) {
// return uri;
// }
// else {
// return "";
// }
// }
//
// void SetUp() override {
// getURI();
// }
//
//};
//
//::testing::Environment* const test_env =
// ::testing::AddGlobalTestEnvironment(new TestEnv);
class MetricTest : public ::testing::Test {
protected:
zilliz::milvus::engine::DB* db_;
void InitLog();
virtual void SetUp() override;
virtual void TearDown() override;
virtual zilliz::milvus::engine::Options GetOptions();
};

View File

@ -14,9 +14,11 @@ aux_source_directory(../../src/cache cache_srcs)
aux_source_directory(../../src/wrapper wrapper_src)
aux_source_directory(./ test_srcs)
set(server_src_files
set(utils_srcs
${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp
${MILVUS_ENGINE_SRC}/utils/AttributeSerializer.cpp
${MILVUS_ENGINE_SRC}/utils/TimeRecorder.cpp
${MILVUS_ENGINE_SRC}/utils/CommonUtil.cpp
${MILVUS_ENGINE_SRC}/utils/LogUtil.cpp
)
cuda_add_executable(server_test
@ -25,7 +27,7 @@ cuda_add_executable(server_test
${cache_srcs}
${wrapper_src}
${test_srcs}
${server_src_files}
${utils_srcs}
${require_files}
)
@ -53,3 +55,11 @@ target_link_libraries(server_test
)
install(TARGETS server_test DESTINATION bin)
configure_file(appendix/server_config.yaml
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/server_config.yaml"
COPYONLY)
configure_file(appendix/log_config.conf
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/log_config.conf"
COPYONLY)

View File

@ -20,8 +20,8 @@
TO_STANDARD_OUTPUT = false
## Error logs
* ERROR:
ENABLED = false
ENABLED = true
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-error.log"
* FATAL:
ENABLED = false
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-fatal.log"
ENABLED = true
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-fatal.log"

Some files were not shown because too many files have changed in this diff Show More