From ce23e6c77c2276351224441d05eedc8a8fe8df00 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 12:46:29 +0800 Subject: [PATCH 001/196] init commit --- .gitignore | 3 + __init__.py | 1 + connections.py | 105 +++++++++++++++++++++++++++++ exception_codes.py | 3 + exceptions.py | 11 ++++ settings.py | 31 +++++++++ utils/__init__.py | 0 utils/logger_helper.py | 145 +++++++++++++++++++++++++++++++++++++++++ 8 files changed, 299 insertions(+) create mode 100644 .gitignore create mode 100644 __init__.py create mode 100644 connections.py create mode 100644 exception_codes.py create mode 100644 exceptions.py create mode 100644 settings.py create mode 100644 utils/__init__.py create mode 100644 utils/logger_helper.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..624eb4fa58 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.env + +__pycache__/ diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000000..7db5c41bd0 --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ +import settings diff --git a/connections.py b/connections.py new file mode 100644 index 0000000000..727864ef98 --- /dev/null +++ b/connections.py @@ -0,0 +1,105 @@ +import logging +from milvus import Milvus +from functools import wraps +from contextlib import contextmanager + +import exceptions + +logger = logging.getLogger(__name__) + +class Connection: + def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): + self.name = name + self.uri = uri + self.max_retry = max_retry + self.retried = 0 + self.conn = Milvus() + self.error_handlers = [] if not error_handlers else error_handlers + self.on_retry_func = kwargs.get('on_retry_func', None) + + def __str__(self): + return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) + + def _connect(self): + try: + self.conn.connect(uri=self.uri) + except Exception as e: + if not self.error_handlers: + raise exceptions.ConnectionConnectError(message='') + for handler in self.error_handlers: + handler(e) + + @property + def can_retry(self): + return self.retried <= self.max_retry + + @property + def connected(self): + return self.conn.connected() + + def on_retry(self): + if self.on_retry_func: + self.on_retry_func(self) + else: + logger.warn('{} is retrying {}'.format(self, self.retried)) + + def on_connect(self): + while not self.connected and self.can_retry: + self.retried += 1 + self.on_retry() + self._connect() + + if not self.can_retry and not self.connected: + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry)) + + self.retried = 0 + + def connect(self, func, exception_handler=None): + @wraps(func) + def inner(*args, **kwargs): + self.on_connect() + try: + return func(*args, **kwargs) + except Exception as e: + if exception_handler: + exception_handler(e) + else: + raise e + return inner + +if __name__ == '__main__': + class Conn: + def __init__(self, state): + self.state = state + + def connect(self, uri): + return self.state + + def connected(self): + return self.state + + fail_conn = Conn(False) + success_conn = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + print('Retrying {}'.format(self.times)) + + + retry_obj = Retry() + c = Connection('client', uri='localhost', on_retry_func=retry_obj) + c.conn = fail_conn + + def f(): + print('ffffffff') + + # m = c.connect(func=f) + # m() + + c.conn = success_conn + m = c.connect(func=f) + m() diff --git a/exception_codes.py b/exception_codes.py new file mode 100644 index 0000000000..5369389e84 --- /dev/null +++ b/exception_codes.py @@ -0,0 +1,3 @@ +INVALID_CODE = -1 + +CONNECT_ERROR_CODE = 10001 diff --git a/exceptions.py b/exceptions.py new file mode 100644 index 0000000000..7178c4ebdc --- /dev/null +++ b/exceptions.py @@ -0,0 +1,11 @@ +import exception_codes as codes + +class BaseException(Exception): + code = codes.INVALID_CODE + message = 'BaseException' + def __init__(self, message='', code=None): + self.message = self.__class__.__name__ if not message else message + self.code = self.code if code is None else code + +class ConnectionConnectError(BaseException): + code = codes.CONNECT_ERROR_CODE diff --git a/settings.py b/settings.py new file mode 100644 index 0000000000..e1a45262c8 --- /dev/null +++ b/settings.py @@ -0,0 +1,31 @@ +import sys +import os + +from environs import Env + +env = Env() +env.read_env() + +DEBUG = env.bool('DEBUG', False) +TESTING = env.bool('TESTING', False) + +METADATA_URI = env.str('METADATA_URI', '') + +LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') +LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') +LOG_NAME = env.str('LOG_NAME', 'logfile') +TIMEZONE = env.str('TIMEZONE', 'UTC') + +from utils.logger_helper import config +config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) + +TIMEOUT = env.int('TIMEOUT', 60) + + +if __name__ == '__main__': + import logging + logger = logging.getLogger(__name__) + logger.debug('DEBUG') + logger.info('INFO') + logger.warn('WARN') + logger.error('ERROR') diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/utils/logger_helper.py b/utils/logger_helper.py new file mode 100644 index 0000000000..1b59aa40ec --- /dev/null +++ b/utils/logger_helper.py @@ -0,0 +1,145 @@ +import os +import datetime +from pytz import timezone +from logging import Filter +import logging.config + + +class InfoFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.INFO + +class DebugFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.DEBUG + +class WarnFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.WARN + +class ErrorFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.ERROR + +class CriticalFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.CRITICAL + + +COLORS = { + 'HEADER': '\033[95m', + 'INFO': '\033[92m', + 'DEBUG': '\033[94m', + 'WARNING': '\033[93m', + 'ERROR': '\033[95m', + 'CRITICAL': '\033[91m', + 'ENDC': '\033[0m', +} + +class ColorFulFormatColMixin: + def format_col(self, message_str, level_name): + if level_name in COLORS.keys(): + message_str = COLORS.get(level_name) + message_str + COLORS.get( + 'ENDC') + return message_str + +class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): + def format(self, record): + message_str = super(ColorfulFormatter, self).format(record) + + return self.format_col(message_str, level_name=record.levelname) + +def config(log_level, log_path, name, tz='UTC'): + def build_log_file(level, log_path, name, tz): + utc_now = datetime.datetime.utcnow() + utc_tz = timezone('UTC') + local_tz = timezone(tz) + tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) + return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), + level) + + if not os.path.exists(log_path): + os.makedirs(log_path) + + LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'default': { + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' + }, + 'colorful_console': { + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', + '()': ColorfulFormatter, + }, + }, + 'filters': { + 'InfoFilter': { + '()': InfoFilter, + }, + 'DebugFilter': { + '()': DebugFilter, + }, + 'WarnFilter': { + '()': WarnFilter, + }, + 'ErrorFilter': { + '()': ErrorFilter, + }, + 'CriticalFilter': { + '()': CriticalFilter, + }, + }, + 'handlers': { + 'milvus_celery_console': { + 'class': 'logging.StreamHandler', + 'formatter': 'colorful_console', + }, + 'milvus_debug_file': { + 'level': 'DEBUG', + 'filters': ['DebugFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('debug', log_path, name, tz) + }, + 'milvus_info_file': { + 'level': 'INFO', + 'filters': ['InfoFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('info', log_path, name, tz) + }, + 'milvus_warn_file': { + 'level': 'WARN', + 'filters': ['WarnFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('warn', log_path, name, tz) + }, + 'milvus_error_file': { + 'level': 'ERROR', + 'filters': ['ErrorFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('error', log_path, name, tz) + }, + 'milvus_critical_file': { + 'level': 'CRITICAL', + 'filters': ['CriticalFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('critical', log_path, name, tz) + }, + }, + 'loggers': { + '': { + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', \ + 'milvus_error_file', 'milvus_critical_file'], + 'level': log_level, + 'propagate': False + }, + }, + 'propagate': False, + } + + logging.config.dictConfig(LOGGING) From 17bb7841843403516acf803157a6e5820511db19 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 12:52:32 +0800 Subject: [PATCH 002/196] (exception): change exception definition --- connections.py | 6 +++--- exceptions.py | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/connections.py b/connections.py index 727864ef98..ea446d5ad3 100644 --- a/connections.py +++ b/connections.py @@ -25,7 +25,7 @@ class Connection: self.conn.connect(uri=self.uri) except Exception as e: if not self.error_handlers: - raise exceptions.ConnectionConnectError(message='') + raise exceptions.ConnectionConnectError() for handler in self.error_handlers: handler(e) @@ -97,8 +97,8 @@ if __name__ == '__main__': def f(): print('ffffffff') - # m = c.connect(func=f) - # m() + m = c.connect(func=f) + m() c.conn = success_conn m = c.connect(func=f) diff --git a/exceptions.py b/exceptions.py index 7178c4ebdc..50db4474c4 100644 --- a/exceptions.py +++ b/exceptions.py @@ -3,9 +3,8 @@ import exception_codes as codes class BaseException(Exception): code = codes.INVALID_CODE message = 'BaseException' - def __init__(self, message='', code=None): + def __init__(self, message=''): self.message = self.__class__.__name__ if not message else message - self.code = self.code if code is None else code class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE From 052d79a58da5fc91b1d36089947634c7d7528e2c Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 14:28:34 +0800 Subject: [PATCH 003/196] (feat): update connections --- connections.py | 105 ++++++++++++++++++++++++++++++++++++++++++--- exception_codes.py | 1 + exceptions.py | 3 ++ service_handler.py | 11 +++++ settings.py | 1 + utils/__init__.py | 10 +++++ 6 files changed, 126 insertions(+), 5 deletions(-) create mode 100644 service_handler.py diff --git a/connections.py b/connections.py index ea446d5ad3..c52a1c5f85 100644 --- a/connections.py +++ b/connections.py @@ -1,9 +1,12 @@ import logging -from milvus import Milvus +import threading from functools import wraps from contextlib import contextmanager +from milvus import Milvus +import settings import exceptions +from utils import singleton logger = logging.getLogger(__name__) @@ -16,6 +19,7 @@ class Connection: self.conn = Milvus() self.error_handlers = [] if not error_handlers else error_handlers self.on_retry_func = kwargs.get('on_retry_func', None) + self._connect() def __str__(self): return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) @@ -67,6 +71,79 @@ class Connection: raise e return inner +@singleton +class ConnectionMgr: + def __init__(self): + self.metas = {} + self.conns = {} + + def conn(self, name, throw=False): + c = self.conns.get(name, None) + if not c: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + threaded = { + threading.get_ident() : this_conn + } + c[name] = threaded + return this_conn + + tid = threading.get_ident() + rconn = c.get(tid, None) + if not rconn: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + c[tid] = this_conn + return this_conn + + return rconn + + def on_new_meta(self, name, url): + self.metas[name] = url + + def on_duplicate_meta(self, name, url): + if self.metas[name] == url: + return self.on_same_meta(name, url) + + return self.on_diff_meta(name, url) + + def on_same_meta(self, name, url): + logger.warn('Register same meta: {}:{}'.format(name, url)) + + def on_diff_meta(self, name, url): + logger.warn('Received {} with diff url={}'.format(name, url)) + self.metas[name] = url + self.conns[name] = {} + + def on_unregister_meta(self, name, url): + logger.info('Unregister name={};url={}'.format(name, url)) + self.conns.pop(name, None) + + def on_nonexisted_meta(self, name): + logger.warn('Non-existed meta: {}'.format(name)) + + def register(self, name, url): + meta = self.metas.get(name) + if not meta: + return self.on_new_meta(name, url) + else: + return self.on_duplicate_meta(name, url) + + def unregister(self, name): + url = self.metas.pop(name, None) + if url is None: + return self.on_nonexisted_meta(name) + return self.on_unregister_meta(name, url) + + if __name__ == '__main__': class Conn: def __init__(self, state): @@ -91,15 +168,33 @@ if __name__ == '__main__': retry_obj = Retry() - c = Connection('client', uri='localhost', on_retry_func=retry_obj) - c.conn = fail_conn + c = Connection('client', uri='', on_retry_func=retry_obj) def f(): print('ffffffff') - m = c.connect(func=f) - m() + # c.conn = fail_conn + # m = c.connect(func=f) + # m() c.conn = success_conn m = c.connect(func=f) m() + + mgr = ConnectionMgr() + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', 'tcp://127.0.0.1:19530') + + pod3 = mgr.conn('pod3') + print(pod3) + + pod2 = mgr.conn('pod2') + print(pod2) + print(pod2.connected) + + mgr.unregister('pod1') + + logger.info(mgr.metas) + logger.info(mgr.conns) diff --git a/exception_codes.py b/exception_codes.py index 5369389e84..c8cfd81dab 100644 --- a/exception_codes.py +++ b/exception_codes.py @@ -1,3 +1,4 @@ INVALID_CODE = -1 CONNECT_ERROR_CODE = 10001 +CONNECTTION_NOT_FOUND_CODE = 10002 diff --git a/exceptions.py b/exceptions.py index 50db4474c4..a25fb2c4ae 100644 --- a/exceptions.py +++ b/exceptions.py @@ -8,3 +8,6 @@ class BaseException(Exception): class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE + +class ConnectionNotFoundError(BaseException): + code = codes.CONNECTTION_NOT_FOUND_CODE diff --git a/service_handler.py b/service_handler.py new file mode 100644 index 0000000000..d5018a54d8 --- /dev/null +++ b/service_handler.py @@ -0,0 +1,11 @@ +import logging + +import grpco +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + def __init__(self, connections, *args, **kwargs): + self.connections = self.connections diff --git a/settings.py b/settings.py index e1a45262c8..4ad00e66cb 100644 --- a/settings.py +++ b/settings.py @@ -20,6 +20,7 @@ from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) +MAX_RETRY = env.int('MAX_RETRY', 3) if __name__ == '__main__': diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2..ec7f32bcbc 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,10 @@ +from functools import wraps + +def singleton(cls): + instances = {} + @wraps(cls) + def getinstance(*args, **kw): + if cls not in instances: + instances[cls] = cls(*args, **kw) + return instances[cls] + return getinstance From 4fc6f0a520159ed09d3e4513a547c0ab6fddde3d Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Tue, 17 Sep 2019 20:48:08 +0800 Subject: [PATCH 004/196] add grpc server --- __init__.py | 1 - mishards/__init__.py | 6 + connections.py => mishards/connections.py | 2 +- .../exception_codes.py | 0 exceptions.py => mishards/exceptions.py | 0 mishards/grpc_utils/__init__.py | 0 mishards/grpc_utils/grpc_args_parser.py | 101 ++++++ mishards/grpc_utils/grpc_args_wrapper.py | 4 + mishards/main.py | 14 + mishards/server.py | 47 +++ mishards/service_handler.py | 327 ++++++++++++++++++ settings.py => mishards/settings.py | 2 + {utils => mishards/utils}/__init__.py | 0 {utils => mishards/utils}/logger_helper.py | 0 service_handler.py | 11 - 15 files changed, 502 insertions(+), 13 deletions(-) delete mode 100644 __init__.py create mode 100644 mishards/__init__.py rename connections.py => mishards/connections.py (99%) rename exception_codes.py => mishards/exception_codes.py (100%) rename exceptions.py => mishards/exceptions.py (100%) create mode 100644 mishards/grpc_utils/__init__.py create mode 100644 mishards/grpc_utils/grpc_args_parser.py create mode 100644 mishards/grpc_utils/grpc_args_wrapper.py create mode 100644 mishards/main.py create mode 100644 mishards/server.py create mode 100644 mishards/service_handler.py rename settings.py => mishards/settings.py (90%) rename {utils => mishards/utils}/__init__.py (100%) rename {utils => mishards/utils}/logger_helper.py (100%) delete mode 100644 service_handler.py diff --git a/__init__.py b/__init__.py deleted file mode 100644 index 7db5c41bd0..0000000000 --- a/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import settings diff --git a/mishards/__init__.py b/mishards/__init__.py new file mode 100644 index 0000000000..700dd4238c --- /dev/null +++ b/mishards/__init__.py @@ -0,0 +1,6 @@ +import settings +from connections import ConnectionMgr +connect_mgr = ConnectionMgr() + +from server import Server +grpc_server = Server(conn_mgr=connect_mgr) diff --git a/connections.py b/mishards/connections.py similarity index 99% rename from connections.py rename to mishards/connections.py index c52a1c5f85..06d5f3ff16 100644 --- a/connections.py +++ b/mishards/connections.py @@ -89,7 +89,7 @@ class ConnectionMgr: threaded = { threading.get_ident() : this_conn } - c[name] = threaded + self.conns[name] = threaded return this_conn tid = threading.get_ident() diff --git a/exception_codes.py b/mishards/exception_codes.py similarity index 100% rename from exception_codes.py rename to mishards/exception_codes.py diff --git a/exceptions.py b/mishards/exceptions.py similarity index 100% rename from exceptions.py rename to mishards/exceptions.py diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mishards/grpc_utils/grpc_args_parser.py b/mishards/grpc_utils/grpc_args_parser.py new file mode 100644 index 0000000000..c8dc9d71d9 --- /dev/null +++ b/mishards/grpc_utils/grpc_args_parser.py @@ -0,0 +1,101 @@ +from milvus import Status +from functools import wraps + + +def error_status(func): + @wraps(func) + def inner(*args, **kwargs): + try: + results = func(*args, **kwargs) + except Exception as e: + return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None + + return Status(code=0, message="Success"), results + + return inner + + +class GrpcArgsParser(object): + + @classmethod + @error_status + def parse_proto_TableSchema(cls, param): + _table_schema = { + 'table_name': param.table_name.table_name, + 'dimension': param.dimension, + 'index_file_size': param.index_file_size, + 'metric_type': param.metric_type + } + + return _table_schema + + @classmethod + @error_status + def parse_proto_TableName(cls, param): + return param.table_name + + @classmethod + @error_status + def parse_proto_Index(cls, param): + _index = { + 'index_type': param.index_type, + 'nlist': param.nlist + } + + return _index + + @classmethod + @error_status + def parse_proto_IndexParam(cls, param): + _table_name = param.table_name.table_name + _status, _index = cls.parse_proto_Index(param.index) + + if not _status.OK(): + raise Exception("Argument parse error") + + return _table_name, _index + + @classmethod + @error_status + def parse_proto_Command(cls, param): + _cmd = param.cmd + + return _cmd + + @classmethod + @error_status + def parse_proto_Range(cls, param): + _start_value = param.start_value + _end_value = param.end_value + + return _start_value, _end_value + + @classmethod + @error_status + def parse_proto_RowRecord(cls, param): + return list(param.vector_data) + + @classmethod + @error_status + def parse_proto_SearchParam(cls, param): + _table_name = param.table_name + _topk = param.topk + _nprobe = param.nprobe + _status, _range = cls.parse_proto_Range(param.query_range_array) + + if not _status.OK(): + raise Exception("Argument parse error") + + _row_record = param.query_record_array + + return _table_name, _row_record, _range, _topk + + @classmethod + @error_status + def parse_proto_DeleteByRangeParam(cls, param): + _table_name = param.table_name + _range = param.range + _start_value = _range.start_value + _end_value = _range.end_value + + return _table_name, _start_value, _end_value diff --git a/mishards/grpc_utils/grpc_args_wrapper.py b/mishards/grpc_utils/grpc_args_wrapper.py new file mode 100644 index 0000000000..a864b1e400 --- /dev/null +++ b/mishards/grpc_utils/grpc_args_wrapper.py @@ -0,0 +1,4 @@ +# class GrpcArgsWrapper(object): + + # @classmethod + # def proto_TableName(cls): \ No newline at end of file diff --git a/mishards/main.py b/mishards/main.py new file mode 100644 index 0000000000..0185e6ac1d --- /dev/null +++ b/mishards/main.py @@ -0,0 +1,14 @@ +import sys +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import settings +from mishards import connect_mgr, grpc_server as server + +def main(): + connect_mgr.register('WOSERVER', settings.WOSERVER) + server.run(port=settings.SERVER_PORT) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/mishards/server.py b/mishards/server.py new file mode 100644 index 0000000000..59ea7db46b --- /dev/null +++ b/mishards/server.py @@ -0,0 +1,47 @@ +import logging +import grpc +import time +from concurrent import futures +from grpc._cython import cygrpc +from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from service_handler import ServiceHandler +import settings + +logger = logging.getLogger(__name__) + + +class Server: + def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): + self.exit_flag = False + self.port = int(port) + self.conn_mgr = conn_mgr + self.server_impl = grpc.server( + thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), + options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), + (cygrpc.ChannelArgKey.max_receive_message_length, -1)] + ) + + def start(self, port=None): + add_MilvusServiceServicer_to_server(ServiceHandler(conn_mgr=self.conn_mgr), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) + self.server_impl.start() + + def run(self, port): + logger.info('Milvus server start ......') + port = port or self.port + + self.start(port) + logger.info('Successfully') + logger.info('Listening on port {}'.format(port)) + + try: + while not self.exit_flag: + time.sleep(5) + except KeyboardInterrupt: + self.stop() + + def stop(self): + logger.info('Server is shuting down ......') + self.exit_flag = True + self.server.stop(0) + logger.info('Server is closed') diff --git a/mishards/service_handler.py b/mishards/service_handler.py new file mode 100644 index 0000000000..ead8d14d88 --- /dev/null +++ b/mishards/service_handler.py @@ -0,0 +1,327 @@ +import logging +from contextlib import contextmanager +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 + +from grpc_utils.grpc_args_parser import GrpcArgsParser as Parser + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + def __init__(self, conn_mgr, *args, **kwargs): + self.conn_mgr = conn_mgr + self.table_meta = {} + + @property + def connection(self): + conn = self.conn_mgr.conn('WOSERVER') + if conn: + conn.on_connect() + return conn.conn + + def CreateTable(self, request, context): + _status, _table_schema = Parser.parse_proto_TableSchema(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('CreateTable {}'.format(_table_schema['table_name'])) + + _status = self.connection.create_table(_table_schema) + + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def HasTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.BoolReply( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + bool_reply=False + ) + + logger.info('HasTable {}'.format(_table_name)) + + _bool = self.connection.has_table(_table_name) + + return milvus_pb2.BoolReply( + status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), + bool_reply=_bool + ) + + def DropTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('DropTable {}'.format(_table_name)) + + _status = self.connection.delete_table(_table_name) + + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def CreateIndex(self, request, context): + _status, unpacks = Parser.parse_proto_IndexParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + _table_name, _index = unpacks + + logger.info('CreateIndex {}'.format(_table_name)) + + # TODO: interface create_table incompleted + _status = self.connection.create_index(_table_name, _index) + + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def Insert(self, request, context): + logger.info('Insert') + # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' + _status, _ids = self.connection.add_vectors(None, None, insert_param=request) + return milvus_pb2.VectorIds( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + vector_id_array=_ids + ) + + def Search(self, request, context): + + try: + table_name = request.table_name + + topk = request.topk + nprobe = request.nprobe + + logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) + + if nprobe > 2048 or nprobe <= 0: + raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) + + table_meta = self.table_meta.get(table_name, None) + if not table_meta: + status, info = self.connection.describe_table(table_name) + if not status.OK(): + raise TableNotFoundException(table_name) + + self.table_meta[table_name] = info + table_meta = info + + start = time.time() + + query_record_array = [] + + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) + + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=exc.code, reason=exc.message) + ) + except Exception as e: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) + ) + + try: + results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, + nprobe, query_range_array) + except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=exc.code, reason=exc.message) + ) + except exceptions.ServiceNotFoundException as exc: + return milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) + ) + except Exception as e: + logger.error(e) + results = workflow.query_vectors(table_name, table_meta, query_record_array, + topk, nprobe, query_range_array) + + now = time.time() + logger.info('SearchVector Ends @{}'.format(now)) + logger.info('SearchVector takes: {}'.format(now - start)) + + topk_result_list = milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=results + ) + return topk_result_list + + def SearchInFiles(self, request, context): + try: + file_id_array = list(request.file_id_array) + search_param = request.search_param + table_name = search_param.table_name + topk = search_param.topk + nprobe = search_param.nprobe + + query_record_array = [] + + for query_record in search_param.query_record_array: + query_record_array.append(list(query_record)) + + query_range_array = [] + for query_range in search_param.query_range_array: + query_range_array.append("") + except Exception as e: + milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)), + ) + + res = search_vector_in_files.delay(table_name=table_name, + file_id_array=file_id_array, + query_record_array=query_record_array, + query_range_array=query_range_array, + topk=topk, + nprobe=nprobe) + status, result = res.get(timeout=1) + + if not status.OK(): + raise ThriftException(code=status.code, reason=status.message) + res = TopKQueryResult() + for top_k_query_results in result: + res.query_result_arrays.append([QueryResult(id=qr.id, distance=qr.distance) + for qr in top_k_query_results]) + return res + + def DescribeTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + table_name = milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + return milvus_pb2.TableSchema( + table_name=table_name + ) + + logger.info('DescribeTable {}'.format(_table_name)) + _status, _table = self.connection.describe_table(_table_name) + + if _status.OK(): + _grpc_table_name = milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table.table_name + ) + + return milvus_pb2.TableSchema( + table_name=_grpc_table_name, + index_file_size=_table.index_file_size, + dimension=_table.dimension, + metric_type=_table.metric_type + ) + + return milvus_pb2.TableSchema( + table_name=milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + ) + + def CountTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + status = status_pb2.Status(error_code=_status.code, reason=_status.message) + + return milvus_pb2.TableRowCount( + status=status + ) + + logger.info('CountTable {}'.format(_table_name)) + + _status, _count = self.connection.get_table_row_count(_table_name) + + return milvus_pb2.TableRowCount( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_row_count=_count if isinstance(_count, int) else -1) + + def Cmd(self, request, context): + _status, _cmd = Parser.parse_proto_Command(request) + logger.info('Cmd: {}'.format(_cmd)) + + if not _status.OK(): + return milvus_pb2.StringReply( + status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + + if _cmd == 'version': + _status, _reply = self.connection.server_version() + else: + _status, _reply = self.connection.server_status() + + return milvus_pb2.StringReply( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + string_reply=_reply + ) + + def ShowTables(self, request, context): + logger.info('ShowTables') + _status, _results = self.connection.show_tables() + + if not _status.OK(): + _results = [] + + for _result in _results: + yield milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_result + ) + + def DeleteByRange(self, request, context): + _status, unpacks = \ + Parser.parse_proto_DeleteByRangeParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + _table_name, _start_date, _end_date = unpacks + + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) + _status = self.connection.delete_vectors_by_range(_table_name, _start_date, _end_date) + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def PreloadTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('PreloadTable {}'.format(_table_name)) + _status = self.connection.preload_table(_table_name) + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + def DescribeIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message) + ) + ) + + logger.info('DescribeIndex {}'.format(_table_name)) + _status, _index_param = self.connection.describe_index(_table_name) + + _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) + _tablename = milvus_pb2.TableName( + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table_name) + + return milvus_pb2.IndexParam(table_name=_tablename, index=_index) + + def DropIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, reason=_status.message) + + logger.info('DropIndex {}'.format(_table_name)) + _status = self.connection.drop_index(_table_name) + return status_pb2.Status(error_code=_status.code, reason=_status.message) diff --git a/settings.py b/mishards/settings.py similarity index 90% rename from settings.py rename to mishards/settings.py index 4ad00e66cb..0566cf066f 100644 --- a/settings.py +++ b/mishards/settings.py @@ -22,6 +22,8 @@ config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) +SERVER_PORT = env.int('SERVER_PORT', 19530) +WOSERVER = env.str('WOSERVER') if __name__ == '__main__': import logging diff --git a/utils/__init__.py b/mishards/utils/__init__.py similarity index 100% rename from utils/__init__.py rename to mishards/utils/__init__.py diff --git a/utils/logger_helper.py b/mishards/utils/logger_helper.py similarity index 100% rename from utils/logger_helper.py rename to mishards/utils/logger_helper.py diff --git a/service_handler.py b/service_handler.py deleted file mode 100644 index d5018a54d8..0000000000 --- a/service_handler.py +++ /dev/null @@ -1,11 +0,0 @@ -import logging - -import grpco -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 - -logger = logging.getLogger(__name__) - - -class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): - def __init__(self, connections, *args, **kwargs): - self.connections = self.connections From 86a893cb0462f7822aa1d4da2aef3f478b67db83 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 11:56:00 +0800 Subject: [PATCH 005/196] impl part of search --- mishards/exception_codes.py | 2 + mishards/exceptions.py | 3 + mishards/main.py | 1 + mishards/service_handler.py | 232 +++++++++++++++++++++++------------- mishards/settings.py | 1 + 5 files changed, 157 insertions(+), 82 deletions(-) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index c8cfd81dab..32b29bdfab 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -2,3 +2,5 @@ INVALID_CODE = -1 CONNECT_ERROR_CODE = 10001 CONNECTTION_NOT_FOUND_CODE = 10002 + +TABLE_NOT_FOUND_CODE = 20001 diff --git a/mishards/exceptions.py b/mishards/exceptions.py index a25fb2c4ae..1445d18769 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -11,3 +11,6 @@ class ConnectionConnectError(BaseException): class ConnectionNotFoundError(BaseException): code = codes.CONNECTTION_NOT_FOUND_CODE + +class TableNotFoundError(BaseException): + code = codes.TABLE_NOT_FOUND_CODE diff --git a/mishards/main.py b/mishards/main.py index 0185e6ac1d..2ba3f14697 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -7,6 +7,7 @@ from mishards import connect_mgr, grpc_server as server def main(): connect_mgr.register('WOSERVER', settings.WOSERVER) + connect_mgr.register('TEST', 'tcp://127.0.0.1:19530') server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index ead8d14d88..89ae2cd36c 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -1,13 +1,22 @@ import logging +import time +import datetime from contextlib import contextmanager -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client import types + +import settings from grpc_utils.grpc_args_parser import GrpcArgsParser as Parser logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + MAX_NPROBE = 2048 def __init__(self, conn_mgr, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} @@ -19,6 +28,99 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn.on_connect() return conn.conn + def query_conn(self, name): + conn = self.conn_mgr.conn(name) + conn and conn.on_connect() + return conn.conn + + def _format_date(self, start, end): + return ((start.year-1900)*10000 + (start.month-1)*100 + start.day + , (end.year-1900)*10000 + (end.month-1)*100 + end.day) + + def _range_to_date(self, range_obj): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start >= end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date + )) + + return self._format_date(start, end) + + def _get_routing_file_ids(self, table_id, range_array): + return { + 'TEST': { + 'table_id': table_id, + 'file_ids': [123] + } + } + + def _do_merge(self, files_n_topk_results, topk, reverse=False): + if not files_n_topk_results: + return [] + + request_results = defaultdict(list) + + calc_time = time.time() + for files_collection in files_n_topk_results: + for request_pos, each_request_results in enumerate(files_collection.topk_query_result): + request_results[request_pos].extend(each_request_results.query_result_arrays) + request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, + reverse=reverse)[:topk] + + calc_time = time.time() - calc_time + logger.info('Merge takes {}'.format(calc_time)) + + results = sorted(request_results.items()) + topk_query_result = [] + + for result in results: + query_result = TopKQueryResult(query_result_arrays=result[1]) + topk_query_result.append(query_result) + + return topk_query_result + + def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): + range_array = [self._range_to_date(r) for r in range_array] if range_array else None + routing = self._get_routing_file_ids(table_id, range_array) + logger.debug(routing) + + rs = [] + all_topk_results = [] + + workers = settings.SEARCH_WORKER_SIZE + + def search(addr, query_params, vectors, topk, nprobe, **kwargs): + logger.info('Send Search Request: addr={};params={};nq={};topk={};nprobe={}'.format( + addr, query_params, len(vectors), topk, nprobe + )) + + conn = self.query_conn(addr) + start = time.time() + ret = conn.search_vectors_in_files(table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) + + all_topk_results.append(ret) + + with ThreadPoolExecutor(max_workers=workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, addr, params, vectors, topk, nprobe) + rs.append(res) + + for res in rs: + res.result() + + reverse = table_meta.metric_type == types.MetricType.L2 + return self._do_merge(all_topk_results, topk, reverse=reverse) + def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -87,64 +189,64 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def Search(self, request, context): - try: - table_name = request.table_name + table_name = request.table_name - topk = request.topk - nprobe = request.nprobe + topk = request.topk + nprobe = request.nprobe - logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) + logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) - if nprobe > 2048 or nprobe <= 0: - raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) - table_meta = self.table_meta.get(table_name, None) - if not table_meta: - status, info = self.connection.describe_table(table_name) - if not status.OK(): - raise TableNotFoundException(table_name) + table_meta = self.table_meta.get(table_name, None) + if not table_meta: + status, info = self.connection.describe_table(table_name) + if not status.OK(): + raise exceptions.TableNotFoundError(table_name) - self.table_meta[table_name] = info - table_meta = info + self.table_meta[table_name] = info + table_meta = info - start = time.time() + start = time.time() - query_record_array = [] + query_record_array = [] - for query_record in request.query_record_array: - query_record_array.append(list(query_record.vector_data)) + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) - query_range_array = [] - for query_range in request.query_range_array: - query_range_array.append( - Range(query_range.start_value, query_range.end_value)) - except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=exc.code, reason=exc.message) - ) - except Exception as e: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) - ) + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + # except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=exc.code, reason=exc.message) + # ) + # except Exception as e: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) + # ) - try: - results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, - nprobe, query_range_array) - except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=exc.code, reason=exc.message) - ) - except exceptions.ServiceNotFoundException as exc: - return milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) - ) - except Exception as e: - logger.error(e) - results = workflow.query_vectors(table_name, table_meta, query_record_array, - topk, nprobe, query_range_array) + results = self._do_query(table_name, table_meta, query_record_array, topk, + nprobe, query_range_array) + # try: + # results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, + # nprobe, query_range_array) + # except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=exc.code, reason=exc.message) + # ) + # except exceptions.ServiceNotFoundException as exc: + # return milvus_pb2.TopKQueryResultList( + # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) + # ) + # except Exception as e: + # logger.error(e) + # results = workflow.query_vectors(table_name, table_meta, query_record_array, + # topk, nprobe, query_range_array) now = time.time() - logger.info('SearchVector Ends @{}'.format(now)) logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( @@ -154,41 +256,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return topk_result_list def SearchInFiles(self, request, context): - try: - file_id_array = list(request.file_id_array) - search_param = request.search_param - table_name = search_param.table_name - topk = search_param.topk - nprobe = search_param.nprobe - - query_record_array = [] - - for query_record in search_param.query_record_array: - query_record_array.append(list(query_record)) - - query_range_array = [] - for query_range in search_param.query_range_array: - query_range_array.append("") - except Exception as e: - milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)), - ) - - res = search_vector_in_files.delay(table_name=table_name, - file_id_array=file_id_array, - query_record_array=query_record_array, - query_range_array=query_range_array, - topk=topk, - nprobe=nprobe) - status, result = res.get(timeout=1) - - if not status.OK(): - raise ThriftException(code=status.code, reason=status.message) - res = TopKQueryResult() - for top_k_query_results in result: - res.query_result_arrays.append([QueryResult(id=qr.id, distance=qr.distance) - for qr in top_k_query_results]) - return res + raise NotImplemented() def DescribeTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) diff --git a/mishards/settings.py b/mishards/settings.py index 0566cf066f..4d87e69fe3 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -21,6 +21,7 @@ config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) +SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') From deb4a5fb62ff540eb06003d9b2940d09b8aeeb16 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 14:50:36 +0800 Subject: [PATCH 006/196] update for service discovery --- mishards/__init__.py | 8 ++ mishards/connections.py | 9 +- mishards/main.py | 16 ++- mishards/server.py | 2 +- mishards/service_founder.py | 273 ++++++++++++++++++++++++++++++++++++ mishards/service_handler.py | 7 +- mishards/settings.py | 11 +- 7 files changed, 315 insertions(+), 11 deletions(-) create mode 100644 mishards/service_founder.py diff --git a/mishards/__init__.py b/mishards/__init__.py index 700dd4238c..b3a14cf7e3 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -2,5 +2,13 @@ import settings from connections import ConnectionMgr connect_mgr = ConnectionMgr() +from service_founder import ServiceFounder +discover = ServiceFounder(namespace=settings.SD_NAMESPACE, + conn_mgr=connect_mgr, + pod_patt=settings.SD_ROSERVER_POD_PATT, + label_selector=settings.SD_LABEL_SELECTOR, + in_cluster=settings.SD_IN_CLUSTER, + poll_interval=settings.SD_POLL_INTERVAL) + from server import Server grpc_server = Server(conn_mgr=connect_mgr) diff --git a/mishards/connections.py b/mishards/connections.py index 06d5f3ff16..82dd082eac 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -29,7 +29,7 @@ class Connection: self.conn.connect(uri=self.uri) except Exception as e: if not self.error_handlers: - raise exceptions.ConnectionConnectError() + raise exceptions.ConnectionConnectError(e) for handler in self.error_handlers: handler(e) @@ -77,6 +77,10 @@ class ConnectionMgr: self.metas = {} self.conns = {} + @property + def conn_names(self): + return set(self.metas.keys()) - set(['WOSERVER']) + def conn(self, name, throw=False): c = self.conns.get(name, None) if not c: @@ -116,7 +120,8 @@ class ConnectionMgr: return self.on_diff_meta(name, url) def on_same_meta(self, name, url): - logger.warn('Register same meta: {}:{}'.format(name, url)) + # logger.warn('Register same meta: {}:{}'.format(name, url)) + pass def on_diff_meta(self, name, url): logger.warn('Received {} with diff url={}'.format(name, url)) diff --git a/mishards/main.py b/mishards/main.py index 2ba3f14697..0526f87ff8 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -3,13 +3,19 @@ import os sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import settings -from mishards import connect_mgr, grpc_server as server +from mishards import (connect_mgr, + discover, + grpc_server as server) def main(): - connect_mgr.register('WOSERVER', settings.WOSERVER) - connect_mgr.register('TEST', 'tcp://127.0.0.1:19530') - server.run(port=settings.SERVER_PORT) - return 0 + try: + discover.start() + connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) + server.run(port=settings.SERVER_PORT) + return 0 + except Exception as e: + logger.error(e) + return 1 if __name__ == '__main__': sys.exit(main()) diff --git a/mishards/server.py b/mishards/server.py index 59ea7db46b..d2f88cf592 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -43,5 +43,5 @@ class Server: def stop(self): logger.info('Server is shuting down ......') self.exit_flag = True - self.server.stop(0) + self.server_impl.stop(0) logger.info('Server is closed') diff --git a/mishards/service_founder.py b/mishards/service_founder.py new file mode 100644 index 0000000000..7fc47639e7 --- /dev/null +++ b/mishards/service_founder.py @@ -0,0 +1,273 @@ +import os, sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import re +import logging +import time +import copy +import threading +import queue +from functools import wraps +from kubernetes import client, config, watch + +from mishards.utils import singleton + +logger = logging.getLogger(__name__) + +incluster_namespace_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' + + +class K8SMixin: + def __init__(self, namespace, in_cluster=False, **kwargs): + self.namespace = namespace + self.in_cluster = in_cluster + self.kwargs = kwargs + self.v1 = kwargs.get('v1', None) + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + if not self.v1: + config.load_incluster_config() if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + +class K8SServiceDiscover(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, label_selector, in_cluster=False, **kwargs): + K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.label_selector = label_selector + self.poll_interval = kwargs.get('poll_interval', 5) + + def run(self): + while not self.terminate: + try: + pods = self.v1.list_namespaced_pod(namespace=self.namespace, label_selector=self.label_selector) + event_message = { + 'eType': 'PodHeartBeat', + 'events': [] + } + for item in pods.items: + pod = self.v1.read_namespaced_pod(name=item.metadata.name, namespace=self.namespace) + name = pod.metadata.name + ip = pod.status.pod_ip + phase = pod.status.phase + reason = pod.status.reason + message = pod.status.message + ready = True if phase == 'Running' else False + + pod_event = dict( + pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message + ) + + event_message['events'].append(pod_event) + + self.queue.put(event_message) + + + except Exception as exc: + logger.error(exc) + + time.sleep(self.poll_interval) + + def stop(self): + self.terminate = True + + +class K8SEventListener(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): + K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.at_start_up = True + self._stop_event = threading.Event() + + def stop(self): + self.terminate = True + self._stop_event.set() + + def run(self): + resource_version = '' + w = watch.Watch() + for event in w.stream(self.v1.list_namespaced_event, namespace=self.namespace, + field_selector='involvedObject.kind=Pod'): + if self.terminate: + break + + resource_version = int(event['object'].metadata.resource_version) + + info = dict( + eType='WatchEvent', + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, + ) + self.at_start_up = False + # logger.info('Received event: {}'.format(info)) + self.queue.put(info) + + +class EventHandler(threading.Thread): + def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): + threading.Thread.__init__(self) + self.mgr = mgr + self.queue = message_queue + self.kwargs = kwargs + self.terminate = False + self.pod_patt = re.compile(pod_patt) + self.namespace = namespace + + def stop(self): + self.terminate = True + + def on_drop(self, event, **kwargs): + pass + + def on_pod_started(self, event, **kwargs): + try_cnt = 3 + pod = None + while try_cnt > 0: + try_cnt -= 1 + try: + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], namespace=self.namespace) + if not pod.status.pod_ip: + time.sleep(0.5) + continue + break + except client.rest.ApiException as exc: + time.sleep(0.5) + + if try_cnt <= 0 and not pod: + if not event['start_up']: + logger.error('Pod {} is started but cannot read pod'.format(event['pod'])) + return + elif try_cnt <= 0 and not pod.status.pod_ip: + logger.warn('NoPodIPFoundError') + return + + logger.info('Register POD {} with IP {}'.format(pod.metadata.name, pod.status.pod_ip)) + self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) + + def on_pod_killing(self, event, **kwargs): + logger.info('Unregister POD {}'.format(event['pod'])) + self.mgr.delete_pod(name=event['pod']) + + def on_pod_heartbeat(self, event, **kwargs): + names = self.mgr.conn_mgr.conn_names + + running_names = set() + for each_event in event['events']: + if each_event['ready']: + self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) + running_names.add(each_event['pod']) + else: + self.mgr.delete_pod(name=each_event['pod']) + + to_delete = names - running_names + for name in to_delete: + self.mgr.delete_pod(name) + + logger.info(self.mgr.conn_mgr.conn_names) + + def handle_event(self, event): + if event['eType'] == 'PodHeartBeat': + return self.on_pod_heartbeat(event) + + if not event or (event['reason'] not in ('Started', 'Killing')): + return self.on_drop(event) + + if not re.match(self.pod_patt, event['pod']): + return self.on_drop(event) + + logger.info('Handling event: {}'.format(event)) + + if event['reason'] == 'Started': + return self.on_pod_started(event) + + return self.on_pod_killing(event) + + def run(self): + while not self.terminate: + try: + event = self.queue.get(timeout=1) + self.handle_event(event) + except queue.Empty: + continue + +@singleton +class ServiceFounder(object): + def __init__(self, conn_mgr, namespace, pod_patt, label_selector, in_cluster=False, **kwargs): + self.namespace = namespace + self.kwargs = kwargs + self.queue = queue.Queue() + self.in_cluster = in_cluster + + self.conn_mgr = conn_mgr + + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + config.load_incluster_config() if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + self.listener = K8SEventListener( + message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs + ) + + self.pod_heartbeater = K8SServiceDiscover( + message_queue=self.queue, + namespace=namespace, + label_selector=label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs + ) + + self.event_handler = EventHandler(mgr=self, + message_queue=self.queue, + namespace=self.namespace, + pod_patt=pod_patt, **kwargs) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + def start(self): + self.listener.daemon = True + self.listener.start() + self.event_handler.start() + while self.listener.at_start_up: + time.sleep(1) + + self.pod_heartbeater.start() + + def stop(self): + self.listener.stop() + self.pod_heartbeater.stop() + self.event_handler.stop() + + +if __name__ == '__main__': + from mishards import connect_mgr + logging.basicConfig(level=logging.INFO) + t = ServiceFounder(namespace='xp', conn_mgr=connect_mgr, pod_patt=".*-ro-servers-.*", label_selector='tier=ro-servers', in_cluster=False) + t.start() + cnt = 2 + while cnt > 0: + time.sleep(2) + cnt -= 1 + t.stop() diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 89ae2cd36c..516359f27c 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -11,6 +11,7 @@ from milvus.client import types import settings from grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +import exceptions logger = logging.getLogger(__name__) @@ -30,7 +31,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def query_conn(self, name): conn = self.conn_mgr.conn(name) - conn and conn.on_connect() + if not conn: + raise exceptions.ConnectionNotFoundError(name) + conn.on_connect() return conn.conn def _format_date(self, start, end): @@ -51,7 +54,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _get_routing_file_ids(self, table_id, range_array): return { - 'TEST': { + 'milvus-ro-servers-0': { 'table_id': table_id, 'file_ids': [123] } diff --git a/mishards/settings.py b/mishards/settings.py index 4d87e69fe3..c4466da6ec 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -7,7 +7,6 @@ env = Env() env.read_env() DEBUG = env.bool('DEBUG', False) -TESTING = env.bool('TESTING', False) METADATA_URI = env.str('METADATA_URI', '') @@ -26,6 +25,16 @@ SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') +SD_NAMESPACE = env.str('SD_NAMESPACE', '') +SD_IN_CLUSTER = env.bool('SD_IN_CLUSTER', False) +SD_POLL_INTERVAL = env.int('SD_POLL_INTERVAL', 5) +SD_ROSERVER_POD_PATT = env.str('SD_ROSERVER_POD_PATT', '') +SD_LABEL_SELECTOR = env.str('SD_LABEL_SELECTOR', '') + +TESTING = env.bool('TESTING', False) +TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') + + if __name__ == '__main__': import logging logger = logging.getLogger(__name__) From 099317edeeea5db14be23709736a8a13ffe4933a Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 15:43:42 +0800 Subject: [PATCH 007/196] add models --- manager.py | 14 +++++++ mishards/__init__.py | 13 +++++-- mishards/connections.py | 5 +-- mishards/db_base.py | 27 +++++++++++++ mishards/exceptions.py | 2 +- mishards/main.py | 20 ++++------ mishards/models.py | 75 +++++++++++++++++++++++++++++++++++++ mishards/server.py | 4 +- mishards/service_handler.py | 6 +-- mishards/settings.py | 4 +- 10 files changed, 144 insertions(+), 26 deletions(-) create mode 100644 manager.py create mode 100644 mishards/db_base.py create mode 100644 mishards/models.py diff --git a/manager.py b/manager.py new file mode 100644 index 0000000000..0a2acad26f --- /dev/null +++ b/manager.py @@ -0,0 +1,14 @@ +import fire +from mishards import db + +class DBHandler: + @classmethod + def create_all(cls): + db.create_all() + + @classmethod + def drop_all(cls): + db.drop_all() + +if __name__ == '__main__': + fire.Fire(DBHandler) diff --git a/mishards/__init__.py b/mishards/__init__.py index b3a14cf7e3..c799e42fa4 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -1,8 +1,13 @@ -import settings -from connections import ConnectionMgr +from mishards import settings + +from mishards.db_base import DB +db = DB() +db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI) + +from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() -from service_founder import ServiceFounder +from mishards.service_founder import ServiceFounder discover = ServiceFounder(namespace=settings.SD_NAMESPACE, conn_mgr=connect_mgr, pod_patt=settings.SD_ROSERVER_POD_PATT, @@ -10,5 +15,5 @@ discover = ServiceFounder(namespace=settings.SD_NAMESPACE, in_cluster=settings.SD_IN_CLUSTER, poll_interval=settings.SD_POLL_INTERVAL) -from server import Server +from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr) diff --git a/mishards/connections.py b/mishards/connections.py index 82dd082eac..9201ea2b08 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -4,9 +4,8 @@ from functools import wraps from contextlib import contextmanager from milvus import Milvus -import settings -import exceptions -from utils import singleton +from mishards import (settings, exceptions) +from mishards.utils import singleton logger = logging.getLogger(__name__) diff --git a/mishards/db_base.py b/mishards/db_base.py new file mode 100644 index 0000000000..702c9e57e9 --- /dev/null +++ b/mishards/db_base.py @@ -0,0 +1,27 @@ +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker, scoped_session + +class DB: + Model = declarative_base() + def __init__(self, uri=None): + uri and self.init_db(uri) + + def init_db(self, uri): + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + pool_pre_ping=True, + max_overflow=0) + self.uri = uri + session = sessionmaker() + session.configure(bind=self.engine) + self.db_session = session() + + @property + def Session(self): + return self.db_session + + def drop_all(self): + self.Model.metadata.drop_all(self.engine) + + def create_all(self): + self.Model.metadata.create_all(self.engine) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 1445d18769..0f89ecb52d 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -1,4 +1,4 @@ -import exception_codes as codes +import mishards.exception_codes as codes class BaseException(Exception): code = codes.INVALID_CODE diff --git a/mishards/main.py b/mishards/main.py index 0526f87ff8..5d96d8b499 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,21 +1,17 @@ -import sys -import os +import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import settings -from mishards import (connect_mgr, +from mishards import ( + settings, + db, connect_mgr, discover, grpc_server as server) def main(): - try: - discover.start() - connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) - server.run(port=settings.SERVER_PORT) - return 0 - except Exception as e: - logger.error(e) - return 1 + discover.start() + connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) + server.run(port=settings.SERVER_PORT) + return 0 if __name__ == '__main__': sys.exit(main()) diff --git a/mishards/models.py b/mishards/models.py new file mode 100644 index 0000000000..c699f490dd --- /dev/null +++ b/mishards/models.py @@ -0,0 +1,75 @@ +import logging +from sqlalchemy import (Integer, Boolean, Text, + String, BigInteger, func, and_, or_, + Column) +from sqlalchemy.orm import relationship, backref + +from mishards import db + +logger = logging.getLogger(__name__) + +class TableFiles(db.Model): + FILE_TYPE_NEW = 0 + FILE_TYPE_RAW = 1 + FILE_TYPE_TO_INDEX = 2 + FILE_TYPE_INDEX = 3 + FILE_TYPE_TO_DELETE = 4 + FILE_TYPE_NEW_MERGE = 5 + FILE_TYPE_NEW_INDEX = 6 + FILE_TYPE_BACKUP = 7 + + __tablename__ = 'TableFiles' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50)) + engine_type = Column(Integer) + file_id = Column(String(50)) + file_type = Column(Integer) + file_size = Column(Integer, default=0) + row_count = Column(Integer, default=0) + updated_time = Column(BigInteger) + created_on = Column(BigInteger) + date = Column(Integer) + + table = relationship( + 'Table', + primaryjoin='and_(foreign(TableFile.table_id) == Table.table_id)', + backref=backref('files', uselist=True, lazy='dynamic') + ) + + +class Tables(db.Model): + TO_DELETE = 1 + NORMAL = 0 + + __tablename__ = 'Tables' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50), unique=True) + state = Column(Integer) + dimension = Column(Integer) + created_on = Column(Integer) + flag = Column(Integer, default=0) + index_file_size = Column(Integer) + engine_type = Column(Integer) + nlist = Column(Integer) + metric_type = Column(Integer) + + def files_to_search(self, date_range=None): + cond = or_( + TableFile.file_type==TableFile.FILE_TYPE_RAW, + TableFile.file_type==TableFile.FILE_TYPE_TO_INDEX, + TableFile.file_type==TableFile.FILE_TYPE_INDEX, + ) + if date_range: + cond = and_( + cond, + or_( + and_(TableFile.date>=d[0], TableFile.date Date: Wed, 18 Sep 2019 16:59:04 +0800 Subject: [PATCH 008/196] update for models --- manager.py | 13 ++++ mishards/__init__.py | 2 +- mishards/db_base.py | 11 ++- mishards/factories.py | 49 ++++++++++++ mishards/hash_ring.py | 150 ++++++++++++++++++++++++++++++++++++ mishards/models.py | 12 +-- mishards/service_founder.py | 4 +- mishards/service_handler.py | 39 ++++++++-- mishards/settings.py | 1 + 9 files changed, 262 insertions(+), 19 deletions(-) create mode 100644 mishards/factories.py create mode 100644 mishards/hash_ring.py diff --git a/manager.py b/manager.py index 0a2acad26f..31f5894d2d 100644 --- a/manager.py +++ b/manager.py @@ -1,5 +1,6 @@ import fire from mishards import db +from sqlalchemy import and_ class DBHandler: @classmethod @@ -10,5 +11,17 @@ class DBHandler: def drop_all(cls): db.drop_all() + @classmethod + def fun(cls, tid): + from mishards.factories import TablesFactory, TableFilesFactory, Tables + f = db.Session.query(Tables).filter(and_( + Tables.table_id==tid, + Tables.state!=Tables.TO_DELETE) + ).first() + print(f) + + # f1 = TableFilesFactory() + + if __name__ == '__main__': fire.Fire(DBHandler) diff --git a/mishards/__init__.py b/mishards/__init__.py index c799e42fa4..a792cd5ce9 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -2,7 +2,7 @@ from mishards import settings from mishards.db_base import DB db = DB() -db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI) +db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() diff --git a/mishards/db_base.py b/mishards/db_base.py index 702c9e57e9..5ad1c394d7 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -1,15 +1,20 @@ +import logging from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session +logger = logging.getLogger(__name__) + class DB: Model = declarative_base() - def __init__(self, uri=None): - uri and self.init_db(uri) + def __init__(self, uri=None, echo=False): + self.echo = echo + uri and self.init_db(uri, echo) - def init_db(self, uri): + def init_db(self, uri, echo=False): self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, pool_pre_ping=True, + echo=echo, max_overflow=0) self.uri = uri session = sessionmaker() diff --git a/mishards/factories.py b/mishards/factories.py new file mode 100644 index 0000000000..5bd059654a --- /dev/null +++ b/mishards/factories.py @@ -0,0 +1,49 @@ +import time +import datetime +import random +import factory +from factory.alchemy import SQLAlchemyModelFactory +from faker import Faker +from faker.providers import BaseProvider + +from mishards import db +from mishards.models import Tables, TableFiles + +class FakerProvider(BaseProvider): + def this_date(self): + t = datetime.datetime.today() + return (t.year - 1900) * 10000 + (t.month-1)*100 + t.day + +factory.Faker.add_provider(FakerProvider) + +class TablesFactory(SQLAlchemyModelFactory): + class Meta: + model = Tables + sqlalchemy_session = db.Session + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table_id = factory.Faker('uuid4') + state = factory.Faker('random_element', elements=(0,1,2,3)) + dimension = factory.Faker('random_element', elements=(256,512)) + created_on = int(time.time()) + index_file_size = 0 + engine_type = factory.Faker('random_element', elements=(0,1,2,3)) + metric_type = factory.Faker('random_element', elements=(0,1)) + nlist = 16384 + +class TableFilesFactory(SQLAlchemyModelFactory): + class Meta: + model = TableFiles + sqlalchemy_session = db.Session + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table = factory.SubFactory(TablesFactory) + engine_type = factory.Faker('random_element', elements=(0,1,2,3)) + file_id = factory.Faker('uuid4') + file_type = factory.Faker('random_element', elements=(0,1,2,3,4)) + file_size = factory.Faker('random_number') + updated_time = int(time.time()) + created_on = int(time.time()) + date = factory.Faker('this_date') diff --git a/mishards/hash_ring.py b/mishards/hash_ring.py new file mode 100644 index 0000000000..bfec108c5c --- /dev/null +++ b/mishards/hash_ring.py @@ -0,0 +1,150 @@ +import math +import sys +from bisect import bisect + +if sys.version_info >= (2, 5): + import hashlib + md5_constructor = hashlib.md5 +else: + import md5 + md5_constructor = md5.new + +class HashRing(object): + + def __init__(self, nodes=None, weights=None): + """`nodes` is a list of objects that have a proper __str__ representation. + `weights` is dictionary that sets weights to the nodes. The default + weight is that all nodes are equal. + """ + self.ring = dict() + self._sorted_keys = [] + + self.nodes = nodes + + if not weights: + weights = {} + self.weights = weights + + self._generate_circle() + + def _generate_circle(self): + """Generates the circle. + """ + total_weight = 0 + for node in self.nodes: + total_weight += self.weights.get(node, 1) + + for node in self.nodes: + weight = 1 + + if node in self.weights: + weight = self.weights.get(node) + + factor = math.floor((40*len(self.nodes)*weight) / total_weight); + + for j in range(0, int(factor)): + b_key = self._hash_digest( '%s-%s' % (node, j) ) + + for i in range(0, 3): + key = self._hash_val(b_key, lambda x: x+i*4) + self.ring[key] = node + self._sorted_keys.append(key) + + self._sorted_keys.sort() + + def get_node(self, string_key): + """Given a string key a corresponding node in the hash ring is returned. + + If the hash ring is empty, `None` is returned. + """ + pos = self.get_node_pos(string_key) + if pos is None: + return None + return self.ring[ self._sorted_keys[pos] ] + + def get_node_pos(self, string_key): + """Given a string key a corresponding node in the hash ring is returned + along with it's position in the ring. + + If the hash ring is empty, (`None`, `None`) is returned. + """ + if not self.ring: + return None + + key = self.gen_key(string_key) + + nodes = self._sorted_keys + pos = bisect(nodes, key) + + if pos == len(nodes): + return 0 + else: + return pos + + def iterate_nodes(self, string_key, distinct=True): + """Given a string key it returns the nodes as a generator that can hold the key. + + The generator iterates one time through the ring + starting at the correct position. + + if `distinct` is set, then the nodes returned will be unique, + i.e. no virtual copies will be returned. + """ + if not self.ring: + yield None, None + + returned_values = set() + def distinct_filter(value): + if str(value) not in returned_values: + returned_values.add(str(value)) + return value + + pos = self.get_node_pos(string_key) + for key in self._sorted_keys[pos:]: + val = distinct_filter(self.ring[key]) + if val: + yield val + + for i, key in enumerate(self._sorted_keys): + if i < pos: + val = distinct_filter(self.ring[key]) + if val: + yield val + + def gen_key(self, key): + """Given a string key it returns a long value, + this long value represents a place on the hash ring. + + md5 is currently used because it mixes well. + """ + b_key = self._hash_digest(key) + return self._hash_val(b_key, lambda x: x) + + def _hash_val(self, b_key, entry_fn): + return (( b_key[entry_fn(3)] << 24) + |(b_key[entry_fn(2)] << 16) + |(b_key[entry_fn(1)] << 8) + | b_key[entry_fn(0)] ) + + def _hash_digest(self, key): + m = md5_constructor() + key = key.encode() + m.update(key) + return m.digest() + +if __name__ == '__main__': + from collections import defaultdict + servers = ['192.168.0.246:11212', + '192.168.0.247:11212', + '192.168.0.248:11212', + '192.168.0.249:11212'] + + ring = HashRing(servers) + keys = ['{}'.format(i) for i in range(100)] + mapped = defaultdict(list) + for k in keys: + server = ring.get_node(k) + mapped[server].append(k) + + for k,v in mapped.items(): + print(k, v) diff --git a/mishards/models.py b/mishards/models.py index c699f490dd..0f7bb603ae 100644 --- a/mishards/models.py +++ b/mishards/models.py @@ -32,8 +32,8 @@ class TableFiles(db.Model): date = Column(Integer) table = relationship( - 'Table', - primaryjoin='and_(foreign(TableFile.table_id) == Table.table_id)', + 'Tables', + primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', backref=backref('files', uselist=True, lazy='dynamic') ) @@ -57,15 +57,15 @@ class Tables(db.Model): def files_to_search(self, date_range=None): cond = or_( - TableFile.file_type==TableFile.FILE_TYPE_RAW, - TableFile.file_type==TableFile.FILE_TYPE_TO_INDEX, - TableFile.file_type==TableFile.FILE_TYPE_INDEX, + TableFiles.file_type==TableFiles.FILE_TYPE_RAW, + TableFiles.file_type==TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type==TableFiles.FILE_TYPE_INDEX, ) if date_range: cond = and_( cond, or_( - and_(TableFile.date>=d[0], TableFile.date=d[0], TableFiles.date Date: Wed, 18 Sep 2019 17:09:03 +0800 Subject: [PATCH 009/196] fix session bug --- mishards/db_base.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mishards/db_base.py b/mishards/db_base.py index 5ad1c394d7..ffbe29f94f 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -17,13 +17,12 @@ class DB: echo=echo, max_overflow=0) self.uri = uri - session = sessionmaker() - session.configure(bind=self.engine) - self.db_session = session() + self.session = sessionmaker() + self.session.configure(bind=self.engine) @property def Session(self): - return self.db_session + return self.session() def drop_all(self): self.Model.metadata.drop_all(self.engine) From f22204878a1b7fefda9cb258ce4002c01100a86f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 17:09:38 +0800 Subject: [PATCH 010/196] fix session bug --- mishards/service_handler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 7dd4380d97..eb2951be5e 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -60,7 +60,6 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): Tables.table_id==table_id, Tables.state!=Tables.TO_DELETE )).first() - logger.error(table) if not table: raise exceptions.TableNotFoundError(table_id) From 0ad5c32c46f29fd5486d02e30f74cc06f17c4eb6 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 18:00:30 +0800 Subject: [PATCH 011/196] update requirements.txt --- requirements.txt | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..8cedabdf7b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,33 @@ +environs==4.2.0 +factory-boy==2.12.0 +Faker==1.0.7 +fire==0.1.3 +google-auth==1.6.3 +grpcio==1.22.0 +grpcio-tools==1.22.0 +kubernetes==10.0.1 +MarkupSafe==1.1.1 +marshmallow==2.19.5 +pymysql==0.9.3 +protobuf==3.9.1 +py==1.8.0 +pyasn1==0.4.7 +pyasn1-modules==0.2.6 +pylint==2.3.1 +#pymilvus-test==0.2.15 +pymilvus==0.2.0 +pyparsing==2.4.0 +pytest==4.6.3 +pytest-level==0.1.1 +pytest-print==0.1.2 +pytest-repeat==0.8.0 +pytest-timeout==1.3.3 +python-dateutil==2.8.0 +python-dotenv==0.10.3 +pytz==2019.1 +requests==2.22.0 +requests-oauthlib==1.2.0 +rsa==4.0 +six==1.12.0 +SQLAlchemy==1.3.5 +urllib3==1.25.3 From c042d2f3234038e01a00e7bc0631b2e653387642 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 18:16:51 +0800 Subject: [PATCH 012/196] add dockerfile --- Dockerfile | 10 ++++++++++ build.sh | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 Dockerfile create mode 100755 build.sh diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..594640619e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.6 +RUN apt update && apt install -y \ + less \ + telnet +RUN mkdir /source +WORKDIR /source +ADD ./requirements.txt ./ +RUN pip install -r requirements.txt +COPY . . +CMD python mishards/main.py diff --git a/build.sh b/build.sh new file mode 100755 index 0000000000..2b3c89bbf9 --- /dev/null +++ b/build.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +BOLD=`tput bold` +NORMAL=`tput sgr0` +YELLOW='\033[1;33m' +ENDC='\033[0m' + +function build_image() { + dockerfile=$1 + remote_registry=$2 + tagged=$2 + buildcmd="docker build -t ${tagged} -f ${dockerfile} ." + echo -e "${BOLD}$buildcmd${NORMAL}" + $buildcmd + pushcmd="docker push ${remote_registry}" + echo -e "${BOLD}$pushcmd${NORMAL}" + $pushcmd + echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" +} + +case "$1" in + +all) + version="" + [[ ! -z $2 ]] && version=":${2}" + build_image "Dockerfile" "registry.zilliz.com/milvus/mishards${version}" "registry.zilliz.com/milvus/mishards" + ;; +*) + echo "Usage: [option...] {base | apps}" + echo "all, Usage: build.sh all [tagname|] => registry.zilliz.com/milvus/mishards:\${tagname}" + ;; +esac From dd59127e9722fcdc9d4b19f17358fb65a73691d4 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 18:17:51 +0800 Subject: [PATCH 013/196] add env example --- mishards/.env.example | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 mishards/.env.example diff --git a/mishards/.env.example b/mishards/.env.example new file mode 100644 index 0000000000..22406c7f34 --- /dev/null +++ b/mishards/.env.example @@ -0,0 +1,14 @@ +DEBUG=False + +WOSERVER=tcp://127.0.0.1:19530 +TESTING_WOSERVER=tcp://127.0.0.1:19530 +SERVER_PORT=19531 + +SD_NAMESPACE=xp +SD_IN_CLUSTER=False +SD_POLL_INTERVAL=5 +SD_ROSERVER_POD_PATT=.*-ro-servers-.* +SD_LABEL_SELECTOR=tier=ro-servers + +SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQL_ECHO=True From cee3d7e20ce1141eb01091d6c262d6e0a771fbf1 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 20:16:02 +0800 Subject: [PATCH 014/196] remove dummy settings --- mishards/settings.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mishards/settings.py b/mishards/settings.py index 62948e2fa9..2bf7e96a8f 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -8,8 +8,6 @@ env.read_env() DEBUG = env.bool('DEBUG', False) -METADATA_URI = env.str('METADATA_URI', '') - LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') LOG_NAME = env.str('LOG_NAME', 'logfile') From e04e00df4b0d5c1358da941267203880c5f2bd96 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 20:16:18 +0800 Subject: [PATCH 015/196] add docker ignore file --- .dockerignore | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..d1012a3afd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +.git +.gitignore +.env + +mishards/.env From e242a1cc91fe4b3afea1dc88f4a42b1817f5b5b2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 18 Sep 2019 20:16:46 +0800 Subject: [PATCH 016/196] temp support dns addr --- mishards/connections.py | 1 + mishards/main.py | 8 +++++++- mishards/service_handler.py | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/mishards/connections.py b/mishards/connections.py index 9201ea2b08..c6323f66f8 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -1,5 +1,6 @@ import logging import threading +import socket from functools import wraps from contextlib import contextmanager from milvus import Milvus diff --git a/mishards/main.py b/mishards/main.py index 5d96d8b499..e9c47f9edf 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,6 +1,9 @@ import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from urllib.parse import urlparse +import socket + from mishards import ( settings, db, connect_mgr, @@ -9,7 +12,10 @@ from mishards import ( def main(): discover.start() - connect_mgr.register('WOSERVER', settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER) + woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER + url = urlparse(woserver) + connect_mgr.register('WOSERVER', + '{}://{}:{}'.format(url.scheme, socket.gethostbyname(url.hostname), url.port)) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index eb2951be5e..ac70440c47 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -145,7 +145,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for res in rs: res.result() - reverse = table_meta.metric_type == types.MetricType.L2 + reverse = table_meta.metric_type == types.MetricType.IP return self._do_merge(all_topk_results, topk, reverse=reverse) def CreateTable(self, request, context): From 512e2b31c46708401c3cba3f3f65c0cc092feef6 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 10:17:00 +0800 Subject: [PATCH 017/196] add pre run handlers --- mishards/main.py | 10 ---------- mishards/server.py | 28 +++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/mishards/main.py b/mishards/main.py index e9c47f9edf..7fac55dfa2 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,21 +1,11 @@ import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from urllib.parse import urlparse -import socket - from mishards import ( settings, - db, connect_mgr, - discover, grpc_server as server) def main(): - discover.start() - woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER - url = urlparse(woserver) - connect_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, socket.gethostbyname(url.hostname), url.port)) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index 185ed3c957..19cca2c18a 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -1,17 +1,21 @@ import logging import grpc import time +import socket +from urllib.parse import urlparse +from functools import wraps from concurrent import futures from grpc._cython import cygrpc from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler -import mishards.settings +from mishards import settings, discover logger = logging.getLogger(__name__) class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): + self.pre_run_handlers = set() self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr @@ -21,6 +25,27 @@ class Server: (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) + self.register_pre_run_handler(self.pre_run_handler) + + def pre_run_handler(self): + woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER + url = urlparse(woserver) + ip = socket.gethostbyname(url.hostname) + logger.error(ip) + socket.inet_pton(socket.AF_INET, ip) + self.conn_mgr.register('WOSERVER', + '{}://{}:{}'.format(url.scheme, ip, url.port)) + + def register_pre_run_handler(self, func): + logger.info('Regiterring {} into server pre_run_handlers'.format(func)) + self.pre_run_handlers.add(func) + return func + + def on_pre_run(self): + for handler in self.pre_run_handlers: + handler() + discover.start() + def start(self, port=None): add_MilvusServiceServicer_to_server(ServiceHandler(conn_mgr=self.conn_mgr), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) @@ -29,6 +54,7 @@ class Server: def run(self, port): logger.info('Milvus server start ......') port = port or self.port + self.on_pre_run() self.start(port) logger.info('Successfully') From d3e79f539ea64e78e6b05910fd607f16c1221e71 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 10:18:46 +0800 Subject: [PATCH 018/196] add pre run handlers --- mishards/server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mishards/server.py b/mishards/server.py index 19cca2c18a..9966360d47 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -31,7 +31,6 @@ class Server: woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER url = urlparse(woserver) ip = socket.gethostbyname(url.hostname) - logger.error(ip) socket.inet_pton(socket.AF_INET, ip) self.conn_mgr.register('WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port)) From 5249b80b0da577bde03da99f884957a5e6d3aad0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 10:22:07 +0800 Subject: [PATCH 019/196] remove dummy commented code --- mishards/service_handler.py | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index ac70440c47..f88655d2d6 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -246,32 +246,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for query_range in request.query_range_array: query_range_array.append( Range(query_range.start_value, query_range.end_value)) - # except (TableNotFoundException, exceptions.GRPCInvlidArgument) as exc: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=exc.code, reason=exc.message) - # ) - # except Exception as e: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=str(e)) - # ) results = self._do_query(table_name, table_meta, query_record_array, topk, nprobe, query_range_array) - # try: - # results = workflow.query_vectors(table_name, table_meta, query_record_array, topk, - # nprobe, query_range_array) - # except (exceptions.GRPCQueryInvalidRangeException, TableNotFoundException) as exc: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=exc.code, reason=exc.message) - # ) - # except exceptions.ServiceNotFoundException as exc: - # return milvus_pb2.TopKQueryResultList( - # status=status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, reason=exc.message) - # ) - # except Exception as e: - # logger.error(e) - # results = workflow.query_vectors(table_name, table_meta, query_record_array, - # topk, nprobe, query_range_array) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) From 09d3e7844936dfcab6ad99e93218a581e4eb095c Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 19 Sep 2019 19:41:20 +0800 Subject: [PATCH 020/196] add exception handler --- mishards/connections.py | 21 ++++++++------ mishards/exception_handlers.py | 35 +++++++++++++++++++++++ mishards/exceptions.py | 3 +- mishards/server.py | 10 +++++++ mishards/service_handler.py | 51 ++++++++++++++++++++-------------- 5 files changed, 89 insertions(+), 31 deletions(-) create mode 100644 mishards/exception_handlers.py diff --git a/mishards/connections.py b/mishards/connections.py index c6323f66f8..365dc60125 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -24,14 +24,14 @@ class Connection: def __str__(self): return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) - def _connect(self): + def _connect(self, metadata=None): try: self.conn.connect(uri=self.uri) except Exception as e: if not self.error_handlers: - raise exceptions.ConnectionConnectError(e) + raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) for handler in self.error_handlers: - handler(e) + handler(e, metadata=metadata) @property def can_retry(self): @@ -47,14 +47,15 @@ class Connection: else: logger.warn('{} is retrying {}'.format(self, self.retried)) - def on_connect(self): + def on_connect(self, metadata=None): while not self.connected and self.can_retry: self.retried += 1 self.on_retry() - self._connect() + self._connect(metadata=metadata) if not self.can_retry and not self.connected: - raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry)) + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, + metadata=metadata)) self.retried = 0 @@ -81,14 +82,15 @@ class ConnectionMgr: def conn_names(self): return set(self.metas.keys()) - set(['WOSERVER']) - def conn(self, name, throw=False): + def conn(self, name, metadata, throw=False): c = self.conns.get(name, None) if not c: url = self.metas.get(name, None) if not url: if not throw: return None - raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) threaded = { threading.get_ident() : this_conn @@ -103,7 +105,8 @@ class ConnectionMgr: if not url: if not throw: return None - raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name)) + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) c[tid] = this_conn return this_conn diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py new file mode 100644 index 0000000000..3de0918be4 --- /dev/null +++ b/mishards/exception_handlers.py @@ -0,0 +1,35 @@ +import logging +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from mishards import server, exceptions + +logger = logging.getLogger(__name__) + +def resp_handler(err, error_code): + if not isinstance(err, exceptions.BaseException): + return status_pb2.Status(error_code=error_code, reason=str(err)) + + status = status_pb2.Status(error_code=error_code, reason=err.message) + + if err.metadata is None: + return status + + resp_class = err.metadata.get('resp_class', None) + if not resp_class: + return status + + if resp_class == milvus_pb2.BoolReply: + return resp_class(status=status, bool_reply=False) + + if resp_class == milvus_pb2.VectorIds: + return resp_class(status=status, vector_id_array=[]) + + if resp_class == milvus_pb2.TopKQueryResultList: + return resp_class(status=status, topk_query_result=[]) + + status.error_code = status_pb2.UNEXPECTED_ERROR + return status + +@server.error_handler(exceptions.TableNotFoundError) +def TableNotFoundErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 0f89ecb52d..1579fefcf4 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -3,8 +3,9 @@ import mishards.exception_codes as codes class BaseException(Exception): code = codes.INVALID_CODE message = 'BaseException' - def __init__(self, message=''): + def __init__(self, message='', metadata=None): self.message = self.__class__.__name__ if not message else message + self.metadata = metadata class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE diff --git a/mishards/server.py b/mishards/server.py index 9966360d47..b000016e29 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -2,6 +2,7 @@ import logging import grpc import time import socket +import inspect from urllib.parse import urlparse from functools import wraps from concurrent import futures @@ -16,6 +17,7 @@ logger = logging.getLogger(__name__) class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): self.pre_run_handlers = set() + self.error_handler = {} self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr @@ -40,6 +42,14 @@ class Server: self.pre_run_handlers.add(func) return func + def errorhandler(self, exception): + if inspect.isclass(exception) and issubclass(exception, Exception): + def wrapper(func): + self.error_handlers[exception] = func + return func + return wrapper + return exception + def on_pre_run(self): for handler in self.pre_run_handlers: handler() diff --git a/mishards/service_handler.py b/mishards/service_handler.py index f88655d2d6..5346be91d8 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -25,18 +25,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): self.conn_mgr = conn_mgr self.table_meta = {} - @property - def connection(self): + def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER') if conn: - conn.on_connect() + conn.on_connect(metadata=metadata) return conn.conn - def query_conn(self, name): - conn = self.conn_mgr.conn(name) + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) if not conn: - raise exceptions.ConnectionNotFoundError(name) - conn.on_connect() + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) return conn.conn def _format_date(self, start, end): @@ -55,14 +54,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return self._format_date(start, end) - def _get_routing_file_ids(self, table_id, range_array): + def _get_routing_file_ids(self, table_id, range_array, metadata=None): table = db.Session.query(Tables).filter(and_( Tables.table_id==table_id, Tables.state!=Tables.TO_DELETE )).first() if not table: - raise exceptions.TableNotFoundError(table_id) + raise exceptions.TableNotFoundError(table_id, metadata=metadata) files = table.files_to_search(range_array) servers = self.conn_mgr.conn_names @@ -84,7 +83,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return routing - def _do_merge(self, files_n_topk_results, topk, reverse=False): + def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): if not files_n_topk_results: return [] @@ -111,9 +110,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): range_array = [self._range_to_date(r) for r in range_array] if range_array else None - routing = self._get_routing_file_ids(table_id, range_array) + routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) + metadata = kwargs.get('metadata', None) + rs = [] all_topk_results = [] @@ -124,7 +125,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): addr, query_params, len(vectors), topk, nprobe )) - conn = self.query_conn(addr) + conn = self.query_conn(addr, metadata=metadata) start = time.time() ret = conn.search_vectors_in_files(table_name=query_params['table_id'], file_ids=query_params['file_ids'], @@ -146,7 +147,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): res.result() reverse = table_meta.metric_type == types.MetricType.IP - return self._do_merge(all_topk_results, topk, reverse=reverse) + return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -156,7 +157,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateTable {}'.format(_table_schema['table_name'])) - _status = self.connection.create_table(_table_schema) + _status = self.connection().create_table(_table_schema) return status_pb2.Status(error_code=_status.code, reason=_status.message) @@ -171,7 +172,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) - _bool = self.connection.has_table(_table_name) + _bool = self.connection(metadata={ + 'resp_class': milvus_pb2.BoolReply + }).has_table(_table_name) return milvus_pb2.BoolReply( status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), @@ -186,7 +189,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('DropTable {}'.format(_table_name)) - _status = self.connection.delete_table(_table_name) + _status = self.connection().delete_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) @@ -201,14 +204,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateIndex {}'.format(_table_name)) # TODO: interface create_table incompleted - _status = self.connection.create_index(_table_name, _index) + _status = self.connection().create_index(_table_name, _index) return status_pb2.Status(error_code=_status.code, reason=_status.message) def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self.connection.add_vectors(None, None, insert_param=request) + _status, _ids = self.connection(metadata={ + 'resp_class': milvus_pb2.VectorIds + }).add_vectors(None, None, insert_param=request) return milvus_pb2.VectorIds( status=status_pb2.Status(error_code=_status.code, reason=_status.message), vector_id_array=_ids @@ -227,10 +232,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) table_meta = self.table_meta.get(table_name, None) + + metadata = { + 'resp_class': milvus_pb2.TopKQueryResultList + } if not table_meta: - status, info = self.connection.describe_table(table_name) + status, info = self.connection(metadata=metadata).describe_table(table_name) if not status.OK(): - raise exceptions.TableNotFoundError(table_name) + raise exceptions.TableNotFoundError(table_name, metadata=metadata) self.table_meta[table_name] = info table_meta = info @@ -248,7 +257,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): Range(query_range.start_value, query_range.end_value)) results = self._do_query(table_name, table_meta, query_record_array, topk, - nprobe, query_range_array) + nprobe, query_range_array, metadata=metadata) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) From eb9174f2d91355c218c4e256a7361d68e776b79e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 09:56:19 +0800 Subject: [PATCH 021/196] optimize exception handlers --- mishards/__init__.py | 2 ++ mishards/exception_codes.py | 1 + mishards/exception_handlers.py | 12 +++++++++-- mishards/exceptions.py | 3 +++ mishards/grpc_utils/__init__.py | 3 +++ mishards/server.py | 26 ++++++++++++++++++++++-- mishards/service_handler.py | 36 ++++++++++++++++++++++++++------- 7 files changed, 72 insertions(+), 11 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index a792cd5ce9..8105e7edc8 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -17,3 +17,5 @@ discover = ServiceFounder(namespace=settings.SD_NAMESPACE, from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr) + +from mishards import exception_handlers diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index 32b29bdfab..37492f25d4 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -4,3 +4,4 @@ CONNECT_ERROR_CODE = 10001 CONNECTTION_NOT_FOUND_CODE = 10002 TABLE_NOT_FOUND_CODE = 20001 +INVALID_ARGUMENT = 20002 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 3de0918be4..6207f2088c 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -1,6 +1,6 @@ import logging from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 -from mishards import server, exceptions +from mishards import grpc_server as server, exceptions logger = logging.getLogger(__name__) @@ -26,10 +26,18 @@ def resp_handler(err, error_code): if resp_class == milvus_pb2.TopKQueryResultList: return resp_class(status=status, topk_query_result=[]) + if resp_class == milvus_pb2.TableRowCount: + return resp_class(status=status, table_row_count=-1) + status.error_code = status_pb2.UNEXPECTED_ERROR return status -@server.error_handler(exceptions.TableNotFoundError) +@server.errorhandler(exceptions.TableNotFoundError) def TableNotFoundErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + +@server.errorhandler(exceptions.InvalidArgumentError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 1579fefcf4..4686cf674f 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -15,3 +15,6 @@ class ConnectionNotFoundError(BaseException): class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE + +class InvalidArgumentError(BaseException): + code = codes.INVALID_ARGUMENT diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index e69de29bb2..959d5549c7 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -0,0 +1,3 @@ +def mark_grpc_method(func): + setattr(func, 'grpc_method', True) + return func diff --git a/mishards/server.py b/mishards/server.py index b000016e29..9cca096b6b 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -7,6 +7,7 @@ from urllib.parse import urlparse from functools import wraps from concurrent import futures from grpc._cython import cygrpc +from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler from mishards import settings, discover @@ -17,7 +18,8 @@ logger = logging.getLogger(__name__) class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): self.pre_run_handlers = set() - self.error_handler = {} + self.grpc_methods = set() + self.error_handlers = {} self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr @@ -42,6 +44,18 @@ class Server: self.pre_run_handlers.add(func) return func + def wrap_method_with_errorhandler(self, func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + if e.__class__ in self.error_handlers: + return self.error_handlers[e.__class__](e) + raise + + return wrapper + def errorhandler(self, exception): if inspect.isclass(exception) and issubclass(exception, Exception): def wrapper(func): @@ -56,7 +70,8 @@ class Server: discover.start() def start(self, port=None): - add_MilvusServiceServicer_to_server(ServiceHandler(conn_mgr=self.conn_mgr), self.server_impl) + handler_class = self.add_error_handlers(ServiceHandler) + add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) self.server_impl.start() @@ -80,3 +95,10 @@ class Server: self.exit_flag = True self.server_impl.stop(0) logger.info('Server is closed') + + def add_error_handlers(self, target): + for key, attr in target.__dict__.items(): + is_grpc_method = getattr(attr, 'grpc_method', False) + if is_grpc_method: + setattr(target, key, self.wrap_method_with_errorhandler(attr)) + return target diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 5346be91d8..acc04c5eee 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -12,6 +12,7 @@ from milvus.grpc_gen.milvus_pb2 import TopKQueryResult from milvus.client import types from mishards import (db, settings, exceptions) +from mishards.grpc_utils import mark_grpc_method from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser from mishards.models import Tables, TableFiles from mishards.hash_ring import HashRing @@ -24,9 +25,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def __init__(self, conn_mgr, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} + self.error_handlers = {} def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER') + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) if conn: conn.on_connect(metadata=metadata) return conn.conn @@ -149,6 +151,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reverse = table_meta.metric_type == types.MetricType.IP return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + @mark_grpc_method def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -161,6 +164,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def HasTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -181,6 +185,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): bool_reply=_bool ) + @mark_grpc_method def DropTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -193,6 +198,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def CreateIndex(self, request, context): _status, unpacks = Parser.parse_proto_IndexParam(request) @@ -208,6 +214,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' @@ -219,6 +226,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): vector_id_array=_ids ) + @mark_grpc_method def Search(self, request, context): table_name = request.table_name @@ -228,14 +236,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) - if nprobe > self.MAX_NPROBE or nprobe <= 0: - raise exceptions.GRPCInvlidArgument('Invalid nprobe: {}'.format(nprobe)) - - table_meta = self.table_meta.get(table_name, None) - metadata = { 'resp_class': milvus_pb2.TopKQueryResultList } + + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.InvalidArgumentError(message='Invalid nprobe: {}'.format(nprobe), + metadata=metadata) + + table_meta = self.table_meta.get(table_name, None) + if not table_meta: status, info = self.connection(metadata=metadata).describe_table(table_name) if not status.OK(): @@ -268,9 +278,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) return topk_result_list + @mark_grpc_method def SearchInFiles(self, request, context): raise NotImplemented() + @mark_grpc_method def DescribeTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -304,6 +316,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) ) + @mark_grpc_method def CountTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -316,12 +329,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CountTable {}'.format(_table_name)) - _status, _count = self.connection.get_table_row_count(_table_name) + metadata = { + 'resp_class': milvus_pb2.TableRowCount + } + _status, _count = self.connection(metadata=metadata).get_table_row_count(_table_name) return milvus_pb2.TableRowCount( status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) + @mark_grpc_method def Cmd(self, request, context): _status, _cmd = Parser.parse_proto_Command(request) logger.info('Cmd: {}'.format(_cmd)) @@ -341,6 +358,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply ) + @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') _status, _results = self.connection.show_tables() @@ -354,6 +372,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_name=_result ) + @mark_grpc_method def DeleteByRange(self, request, context): _status, unpacks = \ Parser.parse_proto_DeleteByRangeParam(request) @@ -367,6 +386,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status = self.connection.delete_vectors_by_range(_table_name, _start_date, _end_date) return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def PreloadTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -377,6 +397,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status = self.connection.preload_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) + @mark_grpc_method def DescribeIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -397,6 +418,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return milvus_pb2.IndexParam(table_name=_tablename, index=_index) + @mark_grpc_method def DropIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) From 1144f6798dcef8ec6422a373f169ba72ddd11f34 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 10:20:25 +0800 Subject: [PATCH 022/196] fix bug in service handler --- mishards/service_handler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index acc04c5eee..128667d9b6 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -112,6 +112,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): range_array = [self._range_to_date(r) for r in range_array] if range_array else None + metadata = kwargs.get('metadata', None) routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) From 33fe3b1bdee22e56a4288a1f65cff50263323954 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 10:44:26 +0800 Subject: [PATCH 023/196] add more exception handlers --- mishards/exception_codes.py | 4 +++- mishards/exception_handlers.py | 13 +++++++++++++ mishards/exceptions.py | 8 +++++++- mishards/service_handler.py | 25 +++++++++++++++++-------- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index 37492f25d4..ecb2469562 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -2,6 +2,8 @@ INVALID_CODE = -1 CONNECT_ERROR_CODE = 10001 CONNECTTION_NOT_FOUND_CODE = 10002 +DB_ERROR_CODE = 10003 TABLE_NOT_FOUND_CODE = 20001 -INVALID_ARGUMENT = 20002 +INVALID_ARGUMENT_CODE = 20002 +INVALID_DATE_RANGE_CODE = 20003 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 6207f2088c..2518b64b3e 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -29,6 +29,9 @@ def resp_handler(err, error_code): if resp_class == milvus_pb2.TableRowCount: return resp_class(status=status, table_row_count=-1) + if resp_class == milvus_pb2.TableName: + return resp_class(status=status, table_name=[]) + status.error_code = status_pb2.UNEXPECTED_ERROR return status @@ -41,3 +44,13 @@ def TableNotFoundErrorHandler(err): def InvalidArgumentErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + +@server.errorhandler(exceptions.DBError) +def DBErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + +@server.errorhandler(exceptions.InvalidRangeError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 4686cf674f..2aa2b39eb9 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -13,8 +13,14 @@ class ConnectionConnectError(BaseException): class ConnectionNotFoundError(BaseException): code = codes.CONNECTTION_NOT_FOUND_CODE +class DBError(BaseException): + code = codes.DB_ERROR_CODE + class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE class InvalidArgumentError(BaseException): - code = codes.INVALID_ARGUMENT + code = codes.INVALID_ARGUMENT_CODE + +class InvalidRangeError(BaseException): + code = codes.INVALID_DATE_RANGE_CODE diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 128667d9b6..536a17c4e3 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -5,10 +5,12 @@ from contextlib import contextmanager from collections import defaultdict from sqlalchemy import and_ +from sqlalchemy import exc as sqlalchemy_exc from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client.Abstract import Range from milvus.client import types from mishards import (db, settings, exceptions) @@ -44,7 +46,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return ((start.year-1900)*10000 + (start.month-1)*100 + start.day , (end.year-1900)*10000 + (end.month-1)*100 + end.day) - def _range_to_date(self, range_obj): + def _range_to_date(self, range_obj, metadata=None): try: start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') @@ -52,15 +54,19 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): except (ValueError, AssertionError): raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( range_obj.start_date, range_obj.end_date - )) + ), metadata=metadata) return self._format_date(start, end) def _get_routing_file_ids(self, table_id, range_array, metadata=None): - table = db.Session.query(Tables).filter(and_( - Tables.table_id==table_id, - Tables.state!=Tables.TO_DELETE - )).first() + # PXU TODO: Implement Thread-local Context + try: + table = db.Session.query(Tables).filter(and_( + Tables.table_id==table_id, + Tables.state!=Tables.TO_DELETE + )).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) if not table: raise exceptions.TableNotFoundError(table_id, metadata=metadata) @@ -111,8 +117,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return topk_query_result def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): - range_array = [self._range_to_date(r) for r in range_array] if range_array else None metadata = kwargs.get('metadata', None) + range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) @@ -362,7 +368,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') - _status, _results = self.connection.show_tables() + metadata = { + 'resp_class': milvus_pb2.TableName + } + _status, _results = self.connection(metadata=metadata).show_tables() if not _status.OK(): _results = [] From 1e2cc2eb6622a46aaa0ff17d230350605b430687 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 11:00:35 +0800 Subject: [PATCH 024/196] refactor sd --- mishards/__init__.py | 2 +- sd/__init__.py | 0 {mishards => sd}/service_founder.py | 0 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 sd/__init__.py rename {mishards => sd}/service_founder.py (100%) diff --git a/mishards/__init__.py b/mishards/__init__.py index 8105e7edc8..3158afa5b3 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -7,7 +7,7 @@ db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() -from mishards.service_founder import ServiceFounder +from sd.service_founder import ServiceFounder discover = ServiceFounder(namespace=settings.SD_NAMESPACE, conn_mgr=connect_mgr, pod_patt=settings.SD_ROSERVER_POD_PATT, diff --git a/sd/__init__.py b/sd/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mishards/service_founder.py b/sd/service_founder.py similarity index 100% rename from mishards/service_founder.py rename to sd/service_founder.py From 8569309644e752b128af402fa95d5575e3096604 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 11:08:14 +0800 Subject: [PATCH 025/196] refactor utils --- mishards/connections.py | 2 +- mishards/settings.py | 2 +- sd/service_founder.py | 2 +- {mishards/utils => utils}/__init__.py | 0 {mishards/utils => utils}/logger_helper.py | 0 5 files changed, 3 insertions(+), 3 deletions(-) rename {mishards/utils => utils}/__init__.py (100%) rename {mishards/utils => utils}/logger_helper.py (100%) diff --git a/mishards/connections.py b/mishards/connections.py index 365dc60125..7307c2a489 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -6,7 +6,7 @@ from contextlib import contextmanager from milvus import Milvus from mishards import (settings, exceptions) -from mishards.utils import singleton +from utils import singleton logger = logging.getLogger(__name__) diff --git a/mishards/settings.py b/mishards/settings.py index 2bf7e96a8f..f99bd3b3c6 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -13,7 +13,7 @@ LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') LOG_NAME = env.str('LOG_NAME', 'logfile') TIMEZONE = env.str('TIMEZONE', 'UTC') -from mishards.utils.logger_helper import config +from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') diff --git a/sd/service_founder.py b/sd/service_founder.py index f1a37a440b..79292d452f 100644 --- a/sd/service_founder.py +++ b/sd/service_founder.py @@ -11,7 +11,7 @@ import queue from functools import wraps from kubernetes import client, config, watch -from mishards.utils import singleton +from utils import singleton logger = logging.getLogger(__name__) diff --git a/mishards/utils/__init__.py b/utils/__init__.py similarity index 100% rename from mishards/utils/__init__.py rename to utils/__init__.py diff --git a/mishards/utils/logger_helper.py b/utils/logger_helper.py similarity index 100% rename from mishards/utils/logger_helper.py rename to utils/logger_helper.py From b4ed4b2e35c3119290b29f1539c2cf37aca7cebd Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 12:17:13 +0800 Subject: [PATCH 026/196] refactor kubernetes service provider --- mishards/__init__.py | 11 ++-- mishards/settings.py | 16 +++-- sd/__init__.py | 27 ++++++++ ...vice_founder.py => kubernetes_provider.py} | 62 ++++++++++++++----- 4 files changed, 90 insertions(+), 26 deletions(-) rename sd/{service_founder.py => kubernetes_provider.py} (83%) diff --git a/mishards/__init__.py b/mishards/__init__.py index 3158afa5b3..55b24c082c 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -7,13 +7,10 @@ db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() -from sd.service_founder import ServiceFounder -discover = ServiceFounder(namespace=settings.SD_NAMESPACE, - conn_mgr=connect_mgr, - pod_patt=settings.SD_ROSERVER_POD_PATT, - label_selector=settings.SD_LABEL_SELECTOR, - in_cluster=settings.SD_IN_CLUSTER, - poll_interval=settings.SD_POLL_INTERVAL) +from sd import ProviderManager + +sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) +discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr) diff --git a/mishards/settings.py b/mishards/settings.py index f99bd3b3c6..046508f92c 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -26,11 +26,17 @@ SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') -SD_NAMESPACE = env.str('SD_NAMESPACE', '') -SD_IN_CLUSTER = env.bool('SD_IN_CLUSTER', False) -SD_POLL_INTERVAL = env.int('SD_POLL_INTERVAL', 5) -SD_ROSERVER_POD_PATT = env.str('SD_ROSERVER_POD_PATT', '') -SD_LABEL_SELECTOR = env.str('SD_LABEL_SELECTOR', '') +SD_PROVIDER_SETTINGS = None +SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') +if SD_PROVIDER == 'Kubernetes': + from sd.kubernetes_provider import KubernetesProviderSettings + SD_PROVIDER_SETTINGS = KubernetesProviderSettings( + namespace=env.str('SD_NAMESPACE', ''), + in_cluster=env.bool('SD_IN_CLUSTER', False), + poll_interval=env.int('SD_POLL_INTERVAL', 5), + pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), + label_selector=env.str('SD_LABEL_SELECTOR', '') + ) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') diff --git a/sd/__init__.py b/sd/__init__.py index e69de29bb2..5c37bc621b 100644 --- a/sd/__init__.py +++ b/sd/__init__.py @@ -0,0 +1,27 @@ +import logging +import inspect +# from utils import singleton + +logger = logging.getLogger(__name__) + + +class ProviderManager: + PROVIDERS = {} + + @classmethod + def register_service_provider(cls, target): + if inspect.isfunction(target): + cls.PROVIDERS[target.__name__] = target + elif inspect.isclass(target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.PROVIDERS[name] = target + else: + assert False, 'Cannot register_service_provider for: {}'.format(target) + return target + + @classmethod + def get_provider(cls, name): + return cls.PROVIDERS.get(name, None) + +from sd import kubernetes_provider diff --git a/sd/service_founder.py b/sd/kubernetes_provider.py similarity index 83% rename from sd/service_founder.py rename to sd/kubernetes_provider.py index 79292d452f..51665a0cb5 100644 --- a/sd/service_founder.py +++ b/sd/kubernetes_provider.py @@ -12,6 +12,7 @@ from functools import wraps from kubernetes import client, config, watch from utils import singleton +from sd import ProviderManager logger = logging.getLogger(__name__) @@ -32,7 +33,7 @@ class K8SMixin: self.v1 = client.CoreV1Api() -class K8SServiceDiscover(threading.Thread, K8SMixin): +class K8SHeartbeatHandler(threading.Thread, K8SMixin): def __init__(self, message_queue, namespace, label_selector, in_cluster=False, **kwargs): K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) threading.Thread.__init__(self) @@ -202,13 +203,26 @@ class EventHandler(threading.Thread): except queue.Empty: continue -@singleton -class ServiceFounder(object): - def __init__(self, conn_mgr, namespace, pod_patt, label_selector, in_cluster=False, **kwargs): +class KubernetesProviderSettings: + def __init__(self, namespace, pod_patt, label_selector, in_cluster, poll_interval, **kwargs): self.namespace = namespace + self.pod_patt = pod_patt + self.label_selector = label_selector + self.in_cluster = in_cluster + self.poll_interval = poll_interval + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Kubernetes' + def __init__(self, settings, conn_mgr, **kwargs): + self.namespace = settings.namespace + self.pod_patt = settings.pod_patt + self.label_selector = settings.label_selector + self.in_cluster = settings.in_cluster + self.poll_interval = settings.poll_interval self.kwargs = kwargs self.queue = queue.Queue() - self.in_cluster = in_cluster self.conn_mgr = conn_mgr @@ -226,19 +240,20 @@ class ServiceFounder(object): **kwargs ) - self.pod_heartbeater = K8SServiceDiscover( + self.pod_heartbeater = K8SHeartbeatHandler( message_queue=self.queue, - namespace=namespace, - label_selector=label_selector, + namespace=self.namespace, + label_selector=self.label_selector, in_cluster=self.in_cluster, v1=self.v1, + poll_interval=self.poll_interval, **kwargs ) self.event_handler = EventHandler(mgr=self, message_queue=self.queue, namespace=self.namespace, - pod_patt=pod_patt, **kwargs) + pod_patt=self.pod_patt, **kwargs) def add_pod(self, name, ip): self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) @@ -250,8 +265,6 @@ class ServiceFounder(object): self.listener.daemon = True self.listener.start() self.event_handler.start() - # while self.listener.at_start_up: - # time.sleep(1) self.pod_heartbeater.start() @@ -262,11 +275,32 @@ class ServiceFounder(object): if __name__ == '__main__': - from mishards import connect_mgr logging.basicConfig(level=logging.INFO) - t = ServiceFounder(namespace='xp', conn_mgr=connect_mgr, pod_patt=".*-ro-servers-.*", label_selector='tier=ro-servers', in_cluster=False) + class Connect: + def register(self, name, value): + logger.error('Register: {} - {}'.format(name, value)) + def unregister(self, name): + logger.error('Unregister: {}'.format(name)) + + @property + def conn_names(self): + return set() + + connect_mgr = Connect() + + settings = KubernetesProviderSettings( + namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) + + provider_class = ProviderManager.get_provider('Kubernetes') + t = provider_class(conn_mgr=connect_mgr, + settings=settings + ) t.start() - cnt = 2 + cnt = 100 while cnt > 0: time.sleep(2) cnt -= 1 From 6acddae13095080d8a60abfcafa6e6cca354a6bf Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 12:53:13 +0800 Subject: [PATCH 027/196] add static provider --- mishards/settings.py | 5 +++++ sd/__init__.py | 2 +- sd/static_provider.py | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 sd/static_provider.py diff --git a/mishards/settings.py b/mishards/settings.py index 046508f92c..46221c5f98 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -37,6 +37,11 @@ if SD_PROVIDER == 'Kubernetes': pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), label_selector=env.str('SD_LABEL_SELECTOR', '') ) +elif SD_PROVIDER == 'Static': + from sd.static_provider import StaticProviderSettings + SD_PROVIDER_SETTINGS = StaticProviderSettings( + hosts=env.list('SD_STATIC_HOSTS', []) + ) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') diff --git a/sd/__init__.py b/sd/__init__.py index 5c37bc621b..6dfba5ddc1 100644 --- a/sd/__init__.py +++ b/sd/__init__.py @@ -24,4 +24,4 @@ class ProviderManager: def get_provider(cls, name): return cls.PROVIDERS.get(name, None) -from sd import kubernetes_provider +from sd import kubernetes_provider, static_provider diff --git a/sd/static_provider.py b/sd/static_provider.py new file mode 100644 index 0000000000..73ae483b34 --- /dev/null +++ b/sd/static_provider.py @@ -0,0 +1,32 @@ +import os, sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from utils import singleton +from sd import ProviderManager + +class StaticProviderSettings: + def __init__(self, hosts): + self.hosts = hosts + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Static' + def __init__(self, settings, conn_mgr, **kwargs): + self.conn_mgr = conn_mgr + self.hosts = settings.hosts + + def start(self): + for host in self.hosts: + self.add_pod(host, host) + + def stop(self): + for host in self.hosts: + self.delete_pod(host) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) From ce95b50143ed4a57cacd414eeece12cb6d1fe638 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 13:32:29 +0800 Subject: [PATCH 028/196] support sqlite --- mishards/db_base.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mishards/db_base.py b/mishards/db_base.py index ffbe29f94f..3b2c699864 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -1,5 +1,6 @@ import logging from sqlalchemy import create_engine +from sqlalchemy.engine.url import make_url from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session @@ -12,7 +13,11 @@ class DB: uri and self.init_db(uri, echo) def init_db(self, uri, echo=False): - self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + url = make_url(uri) + if url.get_backend_name() == 'sqlite': + self.engine = create_engine(url) + else: + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, pool_pre_ping=True, echo=echo, max_overflow=0) From 76eb24484765a3771797701f3498af7ab37b744e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 14:08:57 +0800 Subject: [PATCH 029/196] fix exception handler used in service handler --- mishards/exception_handlers.py | 18 ++++++++++++++++++ mishards/service_handler.py | 28 ++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 2518b64b3e..a2659f91af 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -32,6 +32,24 @@ def resp_handler(err, error_code): if resp_class == milvus_pb2.TableName: return resp_class(status=status, table_name=[]) + if resp_class == milvus_pb2.StringReply: + return resp_class(status=status, string_reply='') + + if resp_class == milvus_pb2.TableSchema: + table_name = milvus_pb2.TableName( + status=status + ) + return milvus_pb2.TableSchema( + table_name=table_name + ) + + if resp_class == milvus_pb2.IndexParam: + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status + ) + ) + status.error_code = status_pb2.UNEXPECTED_ERROR return status diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 536a17c4e3..f39ad3ef46 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -50,7 +50,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): try: start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') - assert start >= end + assert start < end except (ValueError, AssertionError): raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( range_obj.start_date, range_obj.end_date @@ -301,8 +301,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_name=table_name ) + metadata = { + 'resp_class': milvus_pb2.TableSchema + } + logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self.connection.describe_table(_table_name) + _status, _table = self.connection(metadata=metadata).describe_table(_table_name) if _status.OK(): _grpc_table_name = milvus_pb2.TableName( @@ -355,10 +359,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): status_pb2.Status(error_code=_status.code, reason=_status.message) ) + metadata = { + 'resp_class': milvus_pb2.StringReply + } + if _cmd == 'version': - _status, _reply = self.connection.server_version() + _status, _reply = self.connection(metadata=metadata).server_version() else: - _status, _reply = self.connection.server_status() + _status, _reply = self.connection(metadata=metadata).server_status() return milvus_pb2.StringReply( status=status_pb2.Status(error_code=_status.code, reason=_status.message), @@ -393,7 +401,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _table_name, _start_date, _end_date = unpacks logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) - _status = self.connection.delete_vectors_by_range(_table_name, _start_date, _end_date) + _status = self.connection().delete_vectors_by_range(_table_name, _start_date, _end_date) return status_pb2.Status(error_code=_status.code, reason=_status.message) @mark_grpc_method @@ -404,7 +412,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('PreloadTable {}'.format(_table_name)) - _status = self.connection.preload_table(_table_name) + _status = self.connection().preload_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) @mark_grpc_method @@ -418,8 +426,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) ) + metadata = { + 'resp_class': milvus_pb2.IndexParam + } + logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self.connection.describe_index(_table_name) + _status, _index_param = self.connection(metadata=metadata).describe_index(_table_name) _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) _tablename = milvus_pb2.TableName( @@ -436,5 +448,5 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('DropIndex {}'.format(_table_name)) - _status = self.connection.drop_index(_table_name) + _status = self.connection().drop_index(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) From bc056a282929dab4b0e45f2101b3dbef8a28e0a7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 21 Sep 2019 14:13:53 +0800 Subject: [PATCH 030/196] add more print info at startup --- mishards/connections.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mishards/connections.py b/mishards/connections.py index 7307c2a489..35c5d6c3bd 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -139,6 +139,7 @@ class ConnectionMgr: logger.warn('Non-existed meta: {}'.format(name)) def register(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) meta = self.metas.get(name) if not meta: return self.on_new_meta(name, url) @@ -146,6 +147,7 @@ class ConnectionMgr: return self.on_duplicate_meta(name, url) def unregister(self, name): + logger.info('Unregister Connection: name={}'.format(name)) url = self.metas.pop(name, None) if url is None: return self.on_nonexisted_meta(name) From a0a5965fc6c826accf02a64c743d45e636f5b687 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 16:23:02 +0800 Subject: [PATCH 031/196] add tracing --- mishards/server.py | 34 ++++++++++++++++++++++++++++++++++ mishards/settings.py | 9 +++++++++ requirements.txt | 2 ++ 3 files changed, 45 insertions(+) diff --git a/mishards/server.py b/mishards/server.py index 9cca096b6b..4e44731f0e 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -8,12 +8,17 @@ from functools import wraps from concurrent import futures from grpc._cython import cygrpc from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable +from jaeger_client import Config +from grpc_opentracing import open_tracing_server_interceptor +from grpc_opentracing.grpcext import intercept_server from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler from mishards import settings, discover logger = logging.getLogger(__name__) +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server class Server: def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): @@ -23,12 +28,40 @@ class Server: self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr + tracer_interceptor = None + self.tracer = None + interceptor_decorator = empty_server_interceptor_decorator + + if settings.TRACING_ENABLED: + tracer_config = Config(config={ + 'sampler': { + 'type': 'const', + 'param': 1, + }, + 'local_agent': { + 'reporting_host': settings.TracingConfig.TRACING_REPORTING_HOST, + 'reporting_port': settings.TracingConfig.TRACING_REPORTING_PORT + }, + 'logging': settings.TracingConfig.TRACING_LOGGING, + }, + service_name=settings.TracingConfig.TRACING_SERVICE_NAME, + validate=settings.TracingConfig.TRACING_VALIDATE + ) + + self.tracer = tracer_config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor(self.tracer, + log_payloads=settings.TracingConfig.TRACING_LOG_PAYLOAD) + + interceptor_decorator = intercept_server + self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) + self.server_impl = interceptor_decorator(self.server_impl, tracer_interceptor) + self.register_pre_run_handler(self.pre_run_handler) def pre_run_handler(self): @@ -94,6 +127,7 @@ class Server: logger.info('Server is shuting down ......') self.exit_flag = True self.server_impl.stop(0) + self.tracer and self.tracer.close() logger.info('Server is closed') def add_error_handlers(self, target): diff --git a/mishards/settings.py b/mishards/settings.py index 46221c5f98..94b8998881 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -46,6 +46,15 @@ elif SD_PROVIDER == 'Static': TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') +TRACING_ENABLED = env.bool('TRACING_ENABLED', False) +class TracingConfig: + TRACING_LOGGING = env.bool('TRACING_LOGGING', True), + TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') + TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', DEBUG) + TRACING_REPORTING_HOST = env.str('TRACING_REPORTING_HOST', '127.0.0.1') + TRACING_REPORTING_PORT = env.str('TRACING_REPORTING_PORT', '5775') + if __name__ == '__main__': import logging diff --git a/requirements.txt b/requirements.txt index 8cedabdf7b..03db7aeed3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,3 +31,5 @@ rsa==4.0 six==1.12.0 SQLAlchemy==1.3.5 urllib3==1.25.3 +jaeger-client>=3.4.0 +grpcio-opentracing>=1.0 From d4fb05688aa819f0761ed1017717a74e52a78873 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 17:14:18 +0800 Subject: [PATCH 032/196] refactor tracing --- mishards/__init__.py | 5 ++++- mishards/server.py | 35 ++++------------------------------- mishards/settings.py | 2 +- tracing/__init__.py | 17 +++++++++++++++++ tracing/factory.py | 39 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 65 insertions(+), 33 deletions(-) create mode 100644 tracing/__init__.py create mode 100644 tracing/factory.py diff --git a/mishards/__init__.py b/mishards/__init__.py index 55b24c082c..640293c265 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -12,7 +12,10 @@ from sd import ProviderManager sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) +from tracing.factory import TracerFactory +tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig) + from mishards.server import Server -grpc_server = Server(conn_mgr=connect_mgr) +grpc_server = Server(conn_mgr=connect_mgr, tracer=tracer) from mishards import exception_handlers diff --git a/mishards/server.py b/mishards/server.py index 4e44731f0e..93d7e38826 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -9,19 +9,15 @@ from concurrent import futures from grpc._cython import cygrpc from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable from jaeger_client import Config -from grpc_opentracing import open_tracing_server_interceptor -from grpc_opentracing.grpcext import intercept_server from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.service_handler import ServiceHandler from mishards import settings, discover logger = logging.getLogger(__name__) -def empty_server_interceptor_decorator(target_server, interceptor): - return target_server class Server: - def __init__(self, conn_mgr, port=19530, max_workers=10, **kwargs): + def __init__(self, conn_mgr, tracer, port=19530, max_workers=10, **kwargs): self.pre_run_handlers = set() self.grpc_methods = set() self.error_handlers = {} @@ -29,30 +25,7 @@ class Server: self.port = int(port) self.conn_mgr = conn_mgr tracer_interceptor = None - self.tracer = None - interceptor_decorator = empty_server_interceptor_decorator - - if settings.TRACING_ENABLED: - tracer_config = Config(config={ - 'sampler': { - 'type': 'const', - 'param': 1, - }, - 'local_agent': { - 'reporting_host': settings.TracingConfig.TRACING_REPORTING_HOST, - 'reporting_port': settings.TracingConfig.TRACING_REPORTING_PORT - }, - 'logging': settings.TracingConfig.TRACING_LOGGING, - }, - service_name=settings.TracingConfig.TRACING_SERVICE_NAME, - validate=settings.TracingConfig.TRACING_VALIDATE - ) - - self.tracer = tracer_config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor(self.tracer, - log_payloads=settings.TracingConfig.TRACING_LOG_PAYLOAD) - - interceptor_decorator = intercept_server + self.tracer = tracer self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), @@ -60,7 +33,7 @@ class Server: (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) - self.server_impl = interceptor_decorator(self.server_impl, tracer_interceptor) + self.server_impl = self.tracer.decorate(self.server_impl) self.register_pre_run_handler(self.pre_run_handler) @@ -127,7 +100,7 @@ class Server: logger.info('Server is shuting down ......') self.exit_flag = True self.server_impl.stop(0) - self.tracer and self.tracer.close() + self.tracer.close() logger.info('Server is closed') def add_error_handlers(self, target): diff --git a/mishards/settings.py b/mishards/settings.py index 94b8998881..9a8e770f11 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -46,7 +46,7 @@ elif SD_PROVIDER == 'Static': TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') -TRACING_ENABLED = env.bool('TRACING_ENABLED', False) +TRACING_TYPE = env.str('TRACING_TYPE', '') class TracingConfig: TRACING_LOGGING = env.bool('TRACING_LOGGING', True), TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') diff --git a/tracing/__init__.py b/tracing/__init__.py new file mode 100644 index 0000000000..3edddea9df --- /dev/null +++ b/tracing/__init__.py @@ -0,0 +1,17 @@ + +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server + +class Tracer: + def __init__(self, tracer=None, + interceptor=None, + server_decorator=empty_server_interceptor_decorator): + self.tracer = tracer + self.interceptor = interceptor + self.server_decorator=server_decorator + + def decorate(self, server): + return self.server_decorator(server, self.interceptor) + + def close(self): + self.tracer and self.tracer.close() diff --git a/tracing/factory.py b/tracing/factory.py new file mode 100644 index 0000000000..f00a537e78 --- /dev/null +++ b/tracing/factory.py @@ -0,0 +1,39 @@ +import logging +from jaeger_client import Config +from grpc_opentracing.grpcext import intercept_server +from grpc_opentracing import open_tracing_server_interceptor + +from tracing import Tracer, empty_server_interceptor_decorator + +logger = logging.getLogger(__name__) + + +class TracerFactory: + @classmethod + def new_tracer(cls, tracer_type, tracer_config, **kwargs): + if not tracer_type: + return Tracer() + + if tracer_type.lower() == 'jaeger': + config = Config(config={ + 'sampler': { + 'type': 'const', + 'param': 1, + }, + 'local_agent': { + 'reporting_host': tracer_config.TRACING_REPORTING_HOST, + 'reporting_port': tracer_config.TRACING_REPORTING_PORT + }, + 'logging': tracer_config.TRACING_LOGGING, + }, + service_name=tracer_config.TRACING_SERVICE_NAME, + validate=tracer_config.TRACING_VALIDATE + ) + + tracer = config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor(tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD) + + return Tracer(tracer, tracer_interceptor, intercept_server) + + assert False, 'Unsupported tracer type: {}'.format(tracer_type) From 63d3372b4c8931bc0258f378dee00509dc1080ef Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 18:36:19 +0800 Subject: [PATCH 033/196] convert hostname to ip to avoid pymilvus dns domain name parse bug --- mishards/server.py | 2 +- sd/static_provider.py | 3 ++- start_services.yml | 28 ++++++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 start_services.yml diff --git a/mishards/server.py b/mishards/server.py index 93d7e38826..679d5f996e 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -43,7 +43,7 @@ class Server: ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) self.conn_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, ip, url.port)) + '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) def register_pre_run_handler(self, func): logger.info('Regiterring {} into server pre_run_handlers'.format(func)) diff --git a/sd/static_provider.py b/sd/static_provider.py index 73ae483b34..423d6c4d60 100644 --- a/sd/static_provider.py +++ b/sd/static_provider.py @@ -2,6 +2,7 @@ import os, sys if __name__ == '__main__': sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import socket from utils import singleton from sd import ProviderManager @@ -15,7 +16,7 @@ class KubernetesProvider(object): NAME = 'Static' def __init__(self, settings, conn_mgr, **kwargs): self.conn_mgr = conn_mgr - self.hosts = settings.hosts + self.hosts = [socket.gethostbyname(host) for host in settings.hosts] def start(self): for host in self.hosts: diff --git a/start_services.yml b/start_services.yml new file mode 100644 index 0000000000..e2cd0653c3 --- /dev/null +++ b/start_services.yml @@ -0,0 +1,28 @@ +version: "2.3" +services: + milvus: + runtime: nvidia + restart: always + image: registry.zilliz.com/milvus/engine:branch-0.4.0-release-c58ca6 + # ports: + # - "0.0.0.0:19530:19530" + volumes: + - /tmp/milvus/db:/opt/milvus/db + + mishards: + restart: always + image: registry.zilliz.com/milvus/mishards:v0.0.2 + ports: + - "0.0.0.0:19530:19531" + - "0.0.0.0:19532:19532" + volumes: + - /tmp/milvus/db:/tmp/milvus/db + - /tmp/mishards_env:/source/mishards/.env + command: ["python", "mishards/main.py"] + environment: + DEBUG: 'true' + SERVER_PORT: 19531 + WOSERVER: tcp://milvus:19530 + SD_STATIC_HOSTS: milvus + depends_on: + - milvus From d96e601ab83f8b62992e0d16e66741cf2c0d59a5 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 25 Sep 2019 19:37:25 +0800 Subject: [PATCH 034/196] add jaeger in start_services.yml --- start_services.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/start_services.yml b/start_services.yml index e2cd0653c3..5c779c5b82 100644 --- a/start_services.yml +++ b/start_services.yml @@ -9,6 +9,16 @@ services: volumes: - /tmp/milvus/db:/opt/milvus/db + jaeger: + restart: always + image: jaegertracing/all-in-one:1.14 + ports: + - "0.0.0.0:5775:5775/udp" + - "0.0.0.0:16686:16686" + - "0.0.0.0:9441:9441" + environment: + COLLECTOR_ZIPKIN_HTTP_PORT: 9411 + mishards: restart: always image: registry.zilliz.com/milvus/mishards:v0.0.2 @@ -24,5 +34,11 @@ services: SERVER_PORT: 19531 WOSERVER: tcp://milvus:19530 SD_STATIC_HOSTS: milvus + TRACING_TYPE: jaeger + TRACING_SERVICE_NAME: mishards-demo + TRACING_REPORTING_HOST: jaeger + TRACING_REPORTING_PORT: 5775 + depends_on: - milvus + - jaeger From dc2a60f0808701521c3876edf26b5ac26eab90b8 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 10:33:38 +0800 Subject: [PATCH 035/196] fix bug in jaeger tracing settings --- mishards/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/settings.py b/mishards/settings.py index 9a8e770f11..eb6e1e5964 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -48,7 +48,7 @@ TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') TRACING_TYPE = env.str('TRACING_TYPE', '') class TracingConfig: - TRACING_LOGGING = env.bool('TRACING_LOGGING', True), + TRACING_LOGGING = env.bool('TRACING_LOGGING', True) TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', DEBUG) From 4c9cd6dc8ed1ba440bd9839e097c507668b1743f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 11:02:05 +0800 Subject: [PATCH 036/196] add span decorator --- mishards/server.py | 1 - tracing/__init__.py | 12 ++++++++++++ tracing/factory.py | 7 +++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/mishards/server.py b/mishards/server.py index 679d5f996e..9dc09d6f05 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -24,7 +24,6 @@ class Server: self.exit_flag = False self.port = int(port) self.conn_mgr = conn_mgr - tracer_interceptor = None self.tracer = tracer self.server_impl = grpc.server( diff --git a/tracing/__init__.py b/tracing/__init__.py index 3edddea9df..04975c4cfd 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,3 +1,15 @@ +from grpc_opentracing import SpanDecorator + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + if rpc_info.response.status.error_code == 0: + return + span.set_tag('error', True) + error_log = {'event': 'error', + 'error.kind': str(rpc_info.response.status.error_code), + 'message': rpc_info.response.status.reason + } + span.log_kv(error_log) def empty_server_interceptor_decorator(target_server, interceptor): return target_server diff --git a/tracing/factory.py b/tracing/factory.py index f00a537e78..f692563e7b 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -3,7 +3,9 @@ from jaeger_client import Config from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor -from tracing import Tracer, empty_server_interceptor_decorator +from tracing import (Tracer, + GrpcSpanDecorator, + empty_server_interceptor_decorator) logger = logging.getLogger(__name__) @@ -32,7 +34,8 @@ class TracerFactory: tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor(tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD) + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=GrpcSpanDecorator()) return Tracer(tracer, tracer_interceptor, intercept_server) From 48f172facb6db3f27684fd8be4c8c3936cb6e148 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 11:38:31 +0800 Subject: [PATCH 037/196] refactor tracing --- mishards/__init__.py | 4 +++- mishards/grpc_utils/__init__.py | 21 +++++++++++++++++++++ mishards/settings.py | 17 ++++++++++++----- tracing/__init__.py | 13 ------------- tracing/factory.py | 17 +++-------------- 5 files changed, 39 insertions(+), 33 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 640293c265..c1cea84861 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -13,7 +13,9 @@ sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) from tracing.factory import TracerFactory -tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig) +from grpc_utils import GrpcSpanDecorator +tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) from mishards.server import Server grpc_server = Server(conn_mgr=connect_mgr, tracer=tracer) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index 959d5549c7..9ee7d22f37 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -1,3 +1,24 @@ +from grpc_opentracing import SpanDecorator +from milvus.grpc_gen import status_pb2 + + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + status = None + if isinstance(rpc_info.response, status_pb2.Status): + status = rpc_info.response + else: + status = rpc_info.response.status + if status.error_code == 0: + return + span.set_tag('error', True) + span.set_tag('error_code', status.error_code) + error_log = {'event': 'error', + 'request': rpc_info.request, + 'response': rpc_info.response + } + span.log_kv(error_log) + def mark_grpc_method(func): setattr(func, 'grpc_method', True) return func diff --git a/mishards/settings.py b/mishards/settings.py index eb6e1e5964..4a70d44561 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -48,13 +48,20 @@ TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') TRACING_TYPE = env.str('TRACING_TYPE', '') class TracingConfig: - TRACING_LOGGING = env.bool('TRACING_LOGGING', True) TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) - TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', DEBUG) - TRACING_REPORTING_HOST = env.str('TRACING_REPORTING_HOST', '127.0.0.1') - TRACING_REPORTING_PORT = env.str('TRACING_REPORTING_PORT', '5775') - + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) + TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "1"), + }, + 'local_agent': { + 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), + 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') + }, + 'logging': env.bool('TRACING_LOGGING', True) + } if __name__ == '__main__': import logging diff --git a/tracing/__init__.py b/tracing/__init__.py index 04975c4cfd..0aebf6ffba 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,16 +1,3 @@ -from grpc_opentracing import SpanDecorator - -class GrpcSpanDecorator(SpanDecorator): - def __call__(self, span, rpc_info): - if rpc_info.response.status.error_code == 0: - return - span.set_tag('error', True) - error_log = {'event': 'error', - 'error.kind': str(rpc_info.response.status.error_code), - 'message': rpc_info.response.status.reason - } - span.log_kv(error_log) - def empty_server_interceptor_decorator(target_server, interceptor): return target_server diff --git a/tracing/factory.py b/tracing/factory.py index f692563e7b..fd06fe3cac 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -4,7 +4,6 @@ from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor from tracing import (Tracer, - GrpcSpanDecorator, empty_server_interceptor_decorator) logger = logging.getLogger(__name__) @@ -12,22 +11,12 @@ logger = logging.getLogger(__name__) class TracerFactory: @classmethod - def new_tracer(cls, tracer_type, tracer_config, **kwargs): + def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): if not tracer_type: return Tracer() if tracer_type.lower() == 'jaeger': - config = Config(config={ - 'sampler': { - 'type': 'const', - 'param': 1, - }, - 'local_agent': { - 'reporting_host': tracer_config.TRACING_REPORTING_HOST, - 'reporting_port': tracer_config.TRACING_REPORTING_PORT - }, - 'logging': tracer_config.TRACING_LOGGING, - }, + config = Config(config=tracer_config.TRACING_CONFIG, service_name=tracer_config.TRACING_SERVICE_NAME, validate=tracer_config.TRACING_VALIDATE ) @@ -35,7 +24,7 @@ class TracerFactory: tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor(tracer, log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=GrpcSpanDecorator()) + span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) From bdbb70f63f2c72e070b98330e3ced1d959d9c366 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 12:06:38 +0800 Subject: [PATCH 038/196] change grpc decorator --- mishards/grpc_utils/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index 9ee7d22f37..ba9a5e175d 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -5,18 +5,24 @@ from milvus.grpc_gen import status_pb2 class GrpcSpanDecorator(SpanDecorator): def __call__(self, span, rpc_info): status = None + if not rpc_info.response: + return if isinstance(rpc_info.response, status_pb2.Status): status = rpc_info.response else: - status = rpc_info.response.status + try: + status = rpc_info.response.status + except Exception as e: + status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, + reason='Should not happen') + if status.error_code == 0: return - span.set_tag('error', True) - span.set_tag('error_code', status.error_code) error_log = {'event': 'error', 'request': rpc_info.request, 'response': rpc_info.response } + span.set_tag('error', True) span.log_kv(error_log) def mark_grpc_method(func): From 11ba6beb40f2e6b9ef4351cbcffa1b4810b7e5d9 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 15:22:00 +0800 Subject: [PATCH 039/196] update for search error handling --- mishards/service_handler.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index f39ad3ef46..cb904f4e42 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -92,13 +92,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return routing def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): + status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") if not files_n_topk_results: - return [] + return status, [] request_results = defaultdict(list) calc_time = time.time() for files_collection in files_n_topk_results: + if isinstance(files_collection, tuple): + status, _ = files_collection + return status, [] for request_pos, each_request_results in enumerate(files_collection.topk_query_result): request_results[request_pos].extend(each_request_results.query_result_arrays) request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, @@ -114,7 +118,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_result = TopKQueryResult(query_result_arrays=result[1]) topk_query_result.append(query_result) - return topk_query_result + return status, topk_query_result def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): metadata = kwargs.get('metadata', None) @@ -273,14 +277,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_range_array.append( Range(query_range.start_value, query_range.end_value)) - results = self._do_query(table_name, table_meta, query_record_array, topk, + status, results = self._do_query(table_name, table_meta, query_record_array, topk, nprobe, query_range_array, metadata=metadata) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success"), + status=status, topk_query_result=results ) return topk_result_list From 110e56c1b7f20574db351eea6a3c3d812ad21fc3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 18:34:02 +0800 Subject: [PATCH 040/196] add more child span for search --- mishards/server.py | 2 +- mishards/service_handler.py | 31 ++++++++++++++++++------------- tracing/__init__.py | 6 ++++++ 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/mishards/server.py b/mishards/server.py index 9dc09d6f05..876424089c 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -76,7 +76,7 @@ class Server: def start(self, port=None): handler_class = self.add_error_handlers(ServiceHandler) - add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr), self.server_impl) + add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr, tracer=self.tracer), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) self.server_impl.start() diff --git a/mishards/service_handler.py b/mishards/service_handler.py index cb904f4e42..72ae73932c 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -24,10 +24,11 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, *args, **kwargs): + def __init__(self, conn_mgr, tracer, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} self.error_handlers = {} + self.tracer = tracer def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) @@ -120,7 +121,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status, topk_query_result - def _do_query(self, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): + def _do_query(self, context, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): metadata = kwargs.get('metadata', None) range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) @@ -140,16 +141,18 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn = self.query_conn(addr, metadata=metadata) start = time.time() - ret = conn.search_vectors_in_files(table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy=True) - end = time.time() - logger.info('search_vectors_in_files takes: {}'.format(end - start)) + with self.tracer.start_span('search_{}_span'.format(addr), + child_of=context.get_active_span().context): + ret = conn.search_vectors_in_files(table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) - all_topk_results.append(ret) + all_topk_results.append(ret) with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): @@ -160,7 +163,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): res.result() reverse = table_meta.metric_type == types.MetricType.IP - return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + with self.tracer.start_span('do_merge', + child_of=context.get_active_span().context): + return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) @mark_grpc_method def CreateTable(self, request, context): @@ -277,7 +282,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_range_array.append( Range(query_range.start_value, query_range.end_value)) - status, results = self._do_query(table_name, table_meta, query_record_array, topk, + status, results = self._do_query(context, table_name, table_meta, query_record_array, topk, nprobe, query_range_array, metadata=metadata) now = time.time() diff --git a/tracing/__init__.py b/tracing/__init__.py index 0aebf6ffba..27c57473db 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -14,3 +14,9 @@ class Tracer: def close(self): self.tracer and self.tracer.close() + + def start_span(self, operation_name=None, + child_of=None, references=None, tags=None, + start_time=None, ignore_active_span=False): + return self.tracer.start_span(operation_name, child_of, + references, tags, start_time, ignore_active_span) From a6a1ff2f13dbdadb178ae91582a50b50df12e9a2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 26 Sep 2019 19:23:15 +0800 Subject: [PATCH 041/196] add routing span --- mishards/service_handler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 72ae73932c..cafe4be60f 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -124,7 +124,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _do_query(self, context, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): metadata = kwargs.get('metadata', None) range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None - routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) + + routing = {} + with self.tracer.start_span('get_routing', + child_of=context.get_active_span().context): + routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) metadata = kwargs.get('metadata', None) From 81a78a40cb9647d78b59505997f0e02ba936e737 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 10:21:17 +0800 Subject: [PATCH 042/196] more detail tracing in search --- mishards/service_handler.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index cafe4be60f..ddff2903b8 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -145,7 +145,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn = self.query_conn(addr, metadata=metadata) start = time.time() - with self.tracer.start_span('search_{}_span'.format(addr), + span = kwargs.get('span', None) + span = span if span else context.get_active_span().context + with self.tracer.start_span('search_{}'.format(addr), child_of=context.get_active_span().context): ret = conn.search_vectors_in_files(table_name=query_params['table_id'], file_ids=query_params['file_ids'], @@ -158,13 +160,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): all_topk_results.append(ret) - with ThreadPoolExecutor(max_workers=workers) as pool: - for addr, params in routing.items(): - res = pool.submit(search, addr, params, vectors, topk, nprobe) - rs.append(res) + with self.tracer.start_span('do_search', + child_of=context.get_active_span().context) as span: + with ThreadPoolExecutor(max_workers=workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) + rs.append(res) - for res in rs: - res.result() + for res in rs: + res.result() reverse = table_meta.metric_type == types.MetricType.IP with self.tracer.start_span('do_merge', From 98d49b803d76daf40a3bfc5c2f142ba29ddc0433 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 11:29:22 +0800 Subject: [PATCH 043/196] update for proto update --- mishards/exception_handlers.py | 5 +---- mishards/grpc_utils/grpc_args_parser.py | 5 +++-- mishards/service_handler.py | 30 ++++++++----------------- requirements.txt | 4 ++-- 4 files changed, 15 insertions(+), 29 deletions(-) diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index a2659f91af..16ba34a3b1 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -36,11 +36,8 @@ def resp_handler(err, error_code): return resp_class(status=status, string_reply='') if resp_class == milvus_pb2.TableSchema: - table_name = milvus_pb2.TableName( - status=status - ) return milvus_pb2.TableSchema( - table_name=table_name + status=status ) if resp_class == milvus_pb2.IndexParam: diff --git a/mishards/grpc_utils/grpc_args_parser.py b/mishards/grpc_utils/grpc_args_parser.py index c8dc9d71d9..039299803d 100644 --- a/mishards/grpc_utils/grpc_args_parser.py +++ b/mishards/grpc_utils/grpc_args_parser.py @@ -21,7 +21,8 @@ class GrpcArgsParser(object): @error_status def parse_proto_TableSchema(cls, param): _table_schema = { - 'table_name': param.table_name.table_name, + 'status': param.status, + 'table_name': param.table_name, 'dimension': param.dimension, 'index_file_size': param.index_file_size, 'metric_type': param.metric_type @@ -47,7 +48,7 @@ class GrpcArgsParser(object): @classmethod @error_status def parse_proto_IndexParam(cls, param): - _table_name = param.table_name.table_name + _table_name = param.table_name _status, _index = cls.parse_proto_Index(param.index) if not _status.OK(): diff --git a/mishards/service_handler.py b/mishards/service_handler.py index ddff2903b8..81217b52be 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -311,11 +311,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - table_name = milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) return milvus_pb2.TableSchema( - table_name=table_name + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) metadata = { @@ -326,22 +323,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table = self.connection(metadata=metadata).describe_table(_table_name) if _status.OK(): - _grpc_table_name = milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table.table_name - ) - return milvus_pb2.TableSchema( - table_name=_grpc_table_name, + table_name=_table_name, index_file_size=_table.index_file_size, dimension=_table.dimension, - metric_type=_table.metric_type + metric_type=_table.metric_type, + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) return milvus_pb2.TableSchema( - table_name=milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) @mark_grpc_method @@ -398,14 +390,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } _status, _results = self.connection(metadata=metadata).show_tables() - if not _status.OK(): - _results = [] - - for _result in _results: - yield milvus_pb2.TableName( + return milvus_pb2.TableNameList( status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_result - ) + table_names=_results + ) @mark_grpc_method def DeleteByRange(self, request, context): diff --git a/requirements.txt b/requirements.txt index 03db7aeed3..e94f8d1597 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,8 +14,8 @@ py==1.8.0 pyasn1==0.4.7 pyasn1-modules==0.2.6 pylint==2.3.1 -#pymilvus-test==0.2.15 -pymilvus==0.2.0 +pymilvus-test==0.2.15 +#pymilvus==0.2.0 pyparsing==2.4.0 pytest==4.6.3 pytest-level==0.1.1 From 76581d0641f55907f0dd7d8a5b35b4f8b1175e11 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 11:39:24 +0800 Subject: [PATCH 044/196] update DecribeIndex for proto changes --- mishards/service_handler.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 81217b52be..60d64cef37 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -426,9 +426,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if not _status.OK(): return milvus_pb2.IndexParam( - table_name=milvus_pb2.TableName( status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) ) metadata = { @@ -439,11 +437,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _index_param = self.connection(metadata=metadata).describe_index(_table_name) _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) - _tablename = milvus_pb2.TableName( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table_name) - return milvus_pb2.IndexParam(table_name=_tablename, index=_index) + return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table_name, index=_index) @mark_grpc_method def DropIndex(self, request, context): From 663f9a2312997fda9dad71135a49dd307b20898e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 27 Sep 2019 14:03:46 +0800 Subject: [PATCH 045/196] small refactor in server --- mishards/grpc_utils/__init__.py | 5 +++++ mishards/server.py | 14 +++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index ba9a5e175d..550913ed60 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -28,3 +28,8 @@ class GrpcSpanDecorator(SpanDecorator): def mark_grpc_method(func): setattr(func, 'grpc_method', True) return func + +def is_grpc_method(func): + if not func: + return False + return getattr(func, 'grpc_method', False) diff --git a/mishards/server.py b/mishards/server.py index 876424089c..1f72a8812d 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -10,6 +10,7 @@ from grpc._cython import cygrpc from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable from jaeger_client import Config from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from mishards.grpc_utils import is_grpc_method from mishards.service_handler import ServiceHandler from mishards import settings, discover @@ -75,7 +76,7 @@ class Server: discover.start() def start(self, port=None): - handler_class = self.add_error_handlers(ServiceHandler) + handler_class = self.decorate_handler(ServiceHandler) add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr, tracer=self.tracer), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) self.server_impl.start() @@ -102,9 +103,8 @@ class Server: self.tracer.close() logger.info('Server is closed') - def add_error_handlers(self, target): - for key, attr in target.__dict__.items(): - is_grpc_method = getattr(attr, 'grpc_method', False) - if is_grpc_method: - setattr(target, key, self.wrap_method_with_errorhandler(attr)) - return target + def decorate_handler(self, handler): + for key, attr in handler.__dict__.items(): + if is_grpc_method(attr): + setattr(handler, key, self.wrap_method_with_errorhandler(attr)) + return handler From 7220af2cd172ac6a4304b75f6f5e48d409671e70 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 11:01:52 +0800 Subject: [PATCH 046/196] refactor settings --- mishards/__init__.py | 39 +++++++++++++++++++++++---------------- mishards/main.py | 4 ++-- mishards/server.py | 9 ++++++--- mishards/settings.py | 12 +++++++++--- 4 files changed, 40 insertions(+), 24 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index c1cea84861..76f3168b51 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -2,22 +2,29 @@ from mishards import settings from mishards.db_base import DB db = DB() -db.init_db(uri=settings.SQLALCHEMY_DATABASE_URI, echo=settings.SQL_ECHO) - -from mishards.connections import ConnectionMgr -connect_mgr = ConnectionMgr() - -from sd import ProviderManager - -sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) -discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) - -from tracing.factory import TracerFactory -from grpc_utils import GrpcSpanDecorator -tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) from mishards.server import Server -grpc_server = Server(conn_mgr=connect_mgr, tracer=tracer) +grpc_server = Server() -from mishards import exception_handlers +def create_app(testing_config=None): + config = testing_config if testing_config else settings.DefaultConfig + db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + + from mishards.connections import ConnectionMgr + connect_mgr = ConnectionMgr() + + from sd import ProviderManager + + sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) + discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) + + from tracing.factory import TracerFactory + from grpc_utils import GrpcSpanDecorator + tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) + + grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) + + from mishards import exception_handlers + + return grpc_server diff --git a/mishards/main.py b/mishards/main.py index 7fac55dfa2..9197fbf598 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -2,10 +2,10 @@ import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from mishards import ( - settings, - grpc_server as server) + settings, create_app) def main(): + server = create_app() server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index 1f72a8812d..0ca4a8f866 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -12,20 +12,23 @@ from jaeger_client import Config from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.grpc_utils import is_grpc_method from mishards.service_handler import ServiceHandler -from mishards import settings, discover +from mishards import settings logger = logging.getLogger(__name__) class Server: - def __init__(self, conn_mgr, tracer, port=19530, max_workers=10, **kwargs): + def __init__(self): self.pre_run_handlers = set() self.grpc_methods = set() self.error_handlers = {} self.exit_flag = False + + def init_app(self, conn_mgr, tracer, discover, port=19530, max_workers=10, **kwargs): self.port = int(port) self.conn_mgr = conn_mgr self.tracer = tracer + self.discover = discover self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), @@ -73,7 +76,7 @@ class Server: def on_pre_run(self): for handler in self.pre_run_handlers: handler() - discover.start() + self.discover.start() def start(self, port=None): handler_class = self.decorate_handler(ServiceHandler) diff --git a/mishards/settings.py b/mishards/settings.py index 4a70d44561..b42cb791f6 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -16,9 +16,6 @@ TIMEZONE = env.str('TIMEZONE', 'UTC') from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) -SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') -SQL_ECHO = env.bool('SQL_ECHO', False) - TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) @@ -63,6 +60,15 @@ class TracingConfig: 'logging': env.bool('TRACING_LOGGING', True) } +class DefaultConfig: + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') + SQL_ECHO = env.bool('SQL_ECHO', False) + +# class TestingConfig(DefaultConfig): +# SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') +# SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + + if __name__ == '__main__': import logging logger = logging.getLogger(__name__) From 4051cf7e07b54d79c6303f8b0fb7f9311aadd850 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 11:22:46 +0800 Subject: [PATCH 047/196] update for testing config --- mishards/__init__.py | 4 ++++ mishards/db_base.py | 4 ++++ mishards/main.py | 2 +- mishards/server.py | 1 - mishards/settings.py | 8 +++++--- 5 files changed, 14 insertions(+), 5 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 76f3168b51..8682b6eba6 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -7,8 +7,12 @@ from mishards.server import Server grpc_server = Server() def create_app(testing_config=None): + import logging + logger = logging.getLogger() + config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + logger.info(db) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() diff --git a/mishards/db_base.py b/mishards/db_base.py index 3b2c699864..1006f21f55 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -24,6 +24,10 @@ class DB: self.uri = uri self.session = sessionmaker() self.session.configure(bind=self.engine) + self.url = url + + def __str__(self): + return ''.format(self.url.get_backend_name(), self.url.database) @property def Session(self): diff --git a/mishards/main.py b/mishards/main.py index 9197fbf598..5d8db0a179 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -5,7 +5,7 @@ from mishards import ( settings, create_app) def main(): - server = create_app() + server = create_app(settings.TestingConfig if settings.TESTING else settings.DefaultConfig) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index 0ca4a8f866..c044bbb7ad 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -90,7 +90,6 @@ class Server: self.on_pre_run() self.start(port) - logger.info('Successfully') logger.info('Listening on port {}'.format(port)) try: diff --git a/mishards/settings.py b/mishards/settings.py index b42cb791f6..71e94b76a2 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -64,9 +64,11 @@ class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) -# class TestingConfig(DefaultConfig): -# SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') -# SQL_ECHO = env.bool('SQL_TEST_ECHO', False) +TESTING = env.bool('TESTING', False) +if TESTING: + class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) if __name__ == '__main__': From 71231205659444422fcc505c4cd7d5cadae70aa7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 15:54:55 +0800 Subject: [PATCH 048/196] update db session and related factory impl --- mishards/__init__.py | 7 +++---- mishards/db_base.py | 13 ++++++++++--- mishards/factories.py | 4 ++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 8682b6eba6..b351986cba 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -1,4 +1,6 @@ +import logging from mishards import settings +logger = logging.getLogger() from mishards.db_base import DB db = DB() @@ -7,9 +9,6 @@ from mishards.server import Server grpc_server = Server() def create_app(testing_config=None): - import logging - logger = logging.getLogger() - config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) logger.info(db) @@ -23,7 +22,7 @@ def create_app(testing_config=None): discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) from tracing.factory import TracerFactory - from grpc_utils import GrpcSpanDecorator + from mishards.grpc_utils import GrpcSpanDecorator tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, span_decorator=GrpcSpanDecorator()) diff --git a/mishards/db_base.py b/mishards/db_base.py index 1006f21f55..b1492aa8f5 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -3,14 +3,23 @@ from sqlalchemy import create_engine from sqlalchemy.engine.url import make_url from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session +from sqlalchemy.orm.session import Session as SessionBase logger = logging.getLogger(__name__) + +class LocalSession(SessionBase): + def __init__(self, db, autocommit=False, autoflush=True, **options): + self.db = db + bind = options.pop('bind', None) or db.engine + SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + class DB: Model = declarative_base() def __init__(self, uri=None, echo=False): self.echo = echo uri and self.init_db(uri, echo) + self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) def init_db(self, uri, echo=False): url = make_url(uri) @@ -22,8 +31,6 @@ class DB: echo=echo, max_overflow=0) self.uri = uri - self.session = sessionmaker() - self.session.configure(bind=self.engine) self.url = url def __str__(self): @@ -31,7 +38,7 @@ class DB: @property def Session(self): - return self.session() + return self.session_factory() def drop_all(self): self.Model.metadata.drop_all(self.engine) diff --git a/mishards/factories.py b/mishards/factories.py index 5bd059654a..26e9ab2619 100644 --- a/mishards/factories.py +++ b/mishards/factories.py @@ -19,7 +19,7 @@ factory.Faker.add_provider(FakerProvider) class TablesFactory(SQLAlchemyModelFactory): class Meta: model = Tables - sqlalchemy_session = db.Session + sqlalchemy_session = db.session_factory sqlalchemy_session_persistence = 'commit' id = factory.Faker('random_number', digits=16, fix_len=True) @@ -35,7 +35,7 @@ class TablesFactory(SQLAlchemyModelFactory): class TableFilesFactory(SQLAlchemyModelFactory): class Meta: model = TableFiles - sqlalchemy_session = db.Session + sqlalchemy_session = db.session_factory sqlalchemy_session_persistence = 'commit' id = factory.Faker('random_number', digits=16, fix_len=True) From 13bad105e201172d6a072174ffb07ecddf326bfa Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 16:42:05 +0800 Subject: [PATCH 049/196] add unit test --- conftest.py | 22 +++++++++++++++++++++ mishards/test_connections.py | 0 mishards/test_models.py | 38 ++++++++++++++++++++++++++++++++++++ setup.cfg | 4 ++++ 4 files changed, 64 insertions(+) create mode 100644 conftest.py create mode 100644 mishards/test_connections.py create mode 100644 mishards/test_models.py create mode 100644 setup.cfg diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000000..630ff0ba31 --- /dev/null +++ b/conftest.py @@ -0,0 +1,22 @@ +import logging +import pytest +from mishards import settings, db, create_app + +logger = logging.getLogger(__name__) + +def clear_data(session): + meta = db.metadata + for table in reversed(meta.sorted_tables): + session.execute(table.delete()) + session.commit() + +# @pytest.fixture(scope="module") +@pytest.fixture +def app(request): + app = create_app(settings.TestingConfig) + db.drop_all() + db.create_all() + + yield app + + db.drop_all() diff --git a/mishards/test_connections.py b/mishards/test_connections.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mishards/test_models.py b/mishards/test_models.py new file mode 100644 index 0000000000..85dcc246aa --- /dev/null +++ b/mishards/test_models.py @@ -0,0 +1,38 @@ +import logging +import pytest +from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory +from mishards import db, create_app, settings +from mishards.factories import ( + Tables, TableFiles, + TablesFactory, TableFilesFactory + ) + +logger = logging.getLogger(__name__) + +@pytest.mark.usefixtures('app') +class TestModels: + def test_files_to_search(self): + table = TablesFactory() + new_files_cnt = 5 + to_index_cnt = 10 + raw_cnt = 20 + backup_cnt = 12 + to_delete_cnt = 9 + index_cnt = 8 + new_index_cnt = 6 + new_merge_cnt = 11 + + new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) + to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) + raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) + backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) + index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) + new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) + new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) + to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) + assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt + + assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt + assert table.files_to_search([(111, 120)]).count() == 0 + assert table.files_to_search([(111, 121)]).count() == raw_cnt + assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..4a88432914 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[tool:pytest] +testpaths = mishards +log_cli=true +log_cli_level=info From dd38d54d647816516479782404a9c71805cf05b9 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 17:26:28 +0800 Subject: [PATCH 050/196] add connection tests --- conftest.py | 7 ---- mishards/connections.py | 10 ++--- mishards/test_connections.py | 73 ++++++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 12 deletions(-) diff --git a/conftest.py b/conftest.py index 630ff0ba31..c4fed5cc7e 100644 --- a/conftest.py +++ b/conftest.py @@ -4,13 +4,6 @@ from mishards import settings, db, create_app logger = logging.getLogger(__name__) -def clear_data(session): - meta = db.metadata - for table in reversed(meta.sorted_tables): - session.execute(table.delete()) - session.commit() - -# @pytest.fixture(scope="module") @pytest.fixture def app(request): app = create_app(settings.TestingConfig) diff --git a/mishards/connections.py b/mishards/connections.py index 35c5d6c3bd..caaf9629dd 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -35,7 +35,7 @@ class Connection: @property def can_retry(self): - return self.retried <= self.max_retry + return self.retried < self.max_retry @property def connected(self): @@ -45,7 +45,7 @@ class Connection: if self.on_retry_func: self.on_retry_func(self) else: - logger.warn('{} is retrying {}'.format(self, self.retried)) + logger.warning('{} is retrying {}'.format(self, self.retried)) def on_connect(self, metadata=None): while not self.connected and self.can_retry: @@ -123,11 +123,11 @@ class ConnectionMgr: return self.on_diff_meta(name, url) def on_same_meta(self, name, url): - # logger.warn('Register same meta: {}:{}'.format(name, url)) + # logger.warning('Register same meta: {}:{}'.format(name, url)) pass def on_diff_meta(self, name, url): - logger.warn('Received {} with diff url={}'.format(name, url)) + logger.warning('Received {} with diff url={}'.format(name, url)) self.metas[name] = url self.conns[name] = {} @@ -136,7 +136,7 @@ class ConnectionMgr: self.conns.pop(name, None) def on_nonexisted_meta(self, name): - logger.warn('Non-existed meta: {}'.format(name)) + logger.warning('Non-existed meta: {}'.format(name)) def register(self, name, url): logger.info('Register Connection: name={};url={}'.format(name, url)) diff --git a/mishards/test_connections.py b/mishards/test_connections.py index e69de29bb2..1f46b60f8b 100644 --- a/mishards/test_connections.py +++ b/mishards/test_connections.py @@ -0,0 +1,73 @@ +import logging +import pytest + +from mishards.connections import (ConnectionMgr, Connection) +from mishards import exceptions + +logger = logging.getLogger(__name__) + +@pytest.mark.usefixtures('app') +class TestConnection: + def test_manager(self): + mgr = ConnectionMgr() + + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', '2222') + assert len(mgr.conn_names) == 2 + + mgr.unregister('pod1') + assert len(mgr.conn_names) == 1 + + mgr.unregister('pod2') + assert len(mgr.conn_names) == 0 + + mgr.register('WOSERVER', 'xxxx') + assert len(mgr.conn_names) == 0 + + def test_connection(self): + class Conn: + def __init__(self, state): + self.state = state + def connect(self, uri): + return self.state + def connected(self): + return self.state + FAIL_CONN = Conn(False) + PASS_CONN = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + logger.info('Retrying {}'.format(self.times)) + + class Func(): + def __init__(self): + self.executed = False + def __call__(self): + self.executed = True + + max_retry = 3 + + RetryObj = Retry() + c = Connection('client', uri='', + max_retry=max_retry, + on_retry_func=RetryObj) + c.conn = FAIL_CONN + ff = Func() + this_connect = c.connect(func=ff) + with pytest.raises(exceptions.ConnectionConnectError): + this_connect() + assert RetryObj.times == max_retry + assert not ff.executed + RetryObj = Retry() + + c.conn = PASS_CONN + this_connect = c.connect(func=ff) + this_connect() + assert ff.executed + assert RetryObj.times == 0 From 7d1590c691a8aa518290614de6f9df2ca3af21af Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 17:27:33 +0800 Subject: [PATCH 051/196] remove dummy code --- mishards/connections.py | 56 ----------------------------------------- 1 file changed, 56 deletions(-) diff --git a/mishards/connections.py b/mishards/connections.py index caaf9629dd..22524c3a20 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -152,59 +152,3 @@ class ConnectionMgr: if url is None: return self.on_nonexisted_meta(name) return self.on_unregister_meta(name, url) - - -if __name__ == '__main__': - class Conn: - def __init__(self, state): - self.state = state - - def connect(self, uri): - return self.state - - def connected(self): - return self.state - - fail_conn = Conn(False) - success_conn = Conn(True) - - class Retry: - def __init__(self): - self.times = 0 - - def __call__(self, conn): - self.times += 1 - print('Retrying {}'.format(self.times)) - - - retry_obj = Retry() - c = Connection('client', uri='', on_retry_func=retry_obj) - - def f(): - print('ffffffff') - - # c.conn = fail_conn - # m = c.connect(func=f) - # m() - - c.conn = success_conn - m = c.connect(func=f) - m() - - mgr = ConnectionMgr() - mgr.register('pod1', '111') - mgr.register('pod2', '222') - mgr.register('pod2', '222') - mgr.register('pod2', 'tcp://127.0.0.1:19530') - - pod3 = mgr.conn('pod3') - print(pod3) - - pod2 = mgr.conn('pod2') - print(pod2) - print(pod2.connected) - - mgr.unregister('pod1') - - logger.info(mgr.metas) - logger.info(mgr.conns) From 6d25b23e39e8233b18ec2ac95371aa3abb0f4716 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 28 Sep 2019 18:21:33 +0800 Subject: [PATCH 052/196] update env example --- mishards/.env.example | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index 22406c7f34..76b1810759 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -1,8 +1,10 @@ -DEBUG=False +DEBUG=True WOSERVER=tcp://127.0.0.1:19530 TESTING_WOSERVER=tcp://127.0.0.1:19530 -SERVER_PORT=19531 +SERVER_PORT=19532 + +SD_PROVIDER=Static SD_NAMESPACE=xp SD_IN_CLUSTER=False @@ -10,5 +12,21 @@ SD_POLL_INTERVAL=5 SD_ROSERVER_POD_PATT=.*-ro-servers-.* SD_LABEL_SELECTOR=tier=ro-servers +SD_STATIC_HOSTS=127.0.0.1 + SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +#SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True + +TESTING=True +#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_TEST_ECHO=False + +TRACING_TYPE=jaeger +TRACING_SERVICE_NAME=fortest +TRACING_SAMPLER_TYPE=const +TRACING_SAMPLER_PARAM=1 +TRACING_LOG_PAYLOAD=True +#TRACING_SAMPLER_TYPE=probabilistic +#TRACING_SAMPLER_PARAM=0.5 From 498f3e9c8c89a916a8af44f491ccffb8ccd5a068 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 11:20:45 +0800 Subject: [PATCH 053/196] load env example by default --- mishards/settings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mishards/settings.py b/mishards/settings.py index 71e94b76a2..f5028cbbc7 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -1,10 +1,12 @@ import sys import os -from environs import Env +from dotenv import load_dotenv +load_dotenv('./mishards/.env.example') +from environs import Env env = Env() -env.read_env() +env.read_env(override=True) DEBUG = env.bool('DEBUG', False) From bef93edab9921f04d15747a7e245f8649597e4a7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 11:29:41 +0800 Subject: [PATCH 054/196] update default sql url --- mishards/.env.example | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index 76b1810759..47a4549f04 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -14,8 +14,8 @@ SD_LABEL_SELECTOR=tier=ro-servers SD_STATIC_HOSTS=127.0.0.1 -SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 -#SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True TESTING=True From 71c67f59a3b1d348c0e27c49a642bf64b0227a5a Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 13:42:12 +0800 Subject: [PATCH 055/196] update for code style --- conftest.py | 1 + manager.py | 7 ++- mishards/__init__.py | 5 +- mishards/connections.py | 10 ++-- mishards/db_base.py | 8 ++- mishards/exception_handlers.py | 5 ++ mishards/exceptions.py | 8 +++ mishards/factories.py | 18 +++--- mishards/grpc_utils/__init__.py | 10 ++-- mishards/grpc_utils/grpc_args_wrapper.py | 4 +- mishards/hash_ring.py | 28 +++++----- mishards/main.py | 11 ++-- mishards/models.py | 15 ++--- mishards/server.py | 4 +- mishards/service_handler.py | 64 ++++++++++----------- mishards/settings.py | 10 +++- mishards/test_connections.py | 8 ++- mishards/test_models.py | 7 ++- sd/__init__.py | 1 + sd/kubernetes_provider.py | 71 +++++++++++++----------- sd/static_provider.py | 6 +- tracing/__init__.py | 13 +++-- tracing/factory.py | 12 ++-- utils/__init__.py | 1 + utils/logger_helper.py | 17 ++++-- 25 files changed, 201 insertions(+), 143 deletions(-) diff --git a/conftest.py b/conftest.py index c4fed5cc7e..d6c9f3acc7 100644 --- a/conftest.py +++ b/conftest.py @@ -4,6 +4,7 @@ from mishards import settings, db, create_app logger = logging.getLogger(__name__) + @pytest.fixture def app(request): app = create_app(settings.TestingConfig) diff --git a/manager.py b/manager.py index 31f5894d2d..931c90ebc8 100644 --- a/manager.py +++ b/manager.py @@ -2,6 +2,7 @@ import fire from mishards import db from sqlalchemy import and_ + class DBHandler: @classmethod def create_all(cls): @@ -15,9 +16,9 @@ class DBHandler: def fun(cls, tid): from mishards.factories import TablesFactory, TableFilesFactory, Tables f = db.Session.query(Tables).filter(and_( - Tables.table_id==tid, - Tables.state!=Tables.TO_DELETE) - ).first() + Tables.table_id == tid, + Tables.state != Tables.TO_DELETE) + ).first() print(f) # f1 = TableFilesFactory() diff --git a/mishards/__init__.py b/mishards/__init__.py index b351986cba..47d8adb6e3 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -1,4 +1,4 @@ -import logging +import logging from mishards import settings logger = logging.getLogger() @@ -8,6 +8,7 @@ db = DB() from mishards.server import Server grpc_server = Server() + def create_app(testing_config=None): config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) @@ -24,7 +25,7 @@ def create_app(testing_config=None): from tracing.factory import TracerFactory from mishards.grpc_utils import GrpcSpanDecorator tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) + span_decorator=GrpcSpanDecorator()) grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) diff --git a/mishards/connections.py b/mishards/connections.py index 22524c3a20..ccd8e7e81b 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -10,6 +10,7 @@ from utils import singleton logger = logging.getLogger(__name__) + class Connection: def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): self.name = name @@ -55,7 +56,7 @@ class Connection: if not self.can_retry and not self.connected: raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, - metadata=metadata)) + metadata=metadata)) self.retried = 0 @@ -72,6 +73,7 @@ class Connection: raise e return inner + @singleton class ConnectionMgr: def __init__(self): @@ -90,10 +92,10 @@ class ConnectionMgr: if not throw: return None raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), - metadata=metadata) + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) threaded = { - threading.get_ident() : this_conn + threading.get_ident(): this_conn } self.conns[name] = threaded return this_conn @@ -106,7 +108,7 @@ class ConnectionMgr: if not throw: return None raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), - metadata=metadata) + metadata=metadata) this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) c[tid] = this_conn return this_conn diff --git a/mishards/db_base.py b/mishards/db_base.py index b1492aa8f5..6fb3aef4e1 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -14,8 +14,10 @@ class LocalSession(SessionBase): bind = options.pop('bind', None) or db.engine SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + class DB: Model = declarative_base() + def __init__(self, uri=None, echo=False): self.echo = echo uri and self.init_db(uri, echo) @@ -27,9 +29,9 @@ class DB: self.engine = create_engine(url) else: self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, - pool_pre_ping=True, - echo=echo, - max_overflow=0) + pool_pre_ping=True, + echo=echo, + max_overflow=0) self.uri = uri self.url = url diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 16ba34a3b1..1e5ffb3529 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -4,6 +4,7 @@ from mishards import grpc_server as server, exceptions logger = logging.getLogger(__name__) + def resp_handler(err, error_code): if not isinstance(err, exceptions.BaseException): return status_pb2.Status(error_code=error_code, reason=str(err)) @@ -50,21 +51,25 @@ def resp_handler(err, error_code): status.error_code = status_pb2.UNEXPECTED_ERROR return status + @server.errorhandler(exceptions.TableNotFoundError) def TableNotFoundErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + @server.errorhandler(exceptions.InvalidArgumentError) def InvalidArgumentErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + @server.errorhandler(exceptions.DBError) def DBErrorHandler(err): logger.error(err) return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + @server.errorhandler(exceptions.InvalidRangeError) def InvalidArgumentErrorHandler(err): logger.error(err) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index 2aa2b39eb9..acd9372d6a 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -1,26 +1,34 @@ import mishards.exception_codes as codes + class BaseException(Exception): code = codes.INVALID_CODE message = 'BaseException' + def __init__(self, message='', metadata=None): self.message = self.__class__.__name__ if not message else message self.metadata = metadata + class ConnectionConnectError(BaseException): code = codes.CONNECT_ERROR_CODE + class ConnectionNotFoundError(BaseException): code = codes.CONNECTTION_NOT_FOUND_CODE + class DBError(BaseException): code = codes.DB_ERROR_CODE + class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE + class InvalidArgumentError(BaseException): code = codes.INVALID_ARGUMENT_CODE + class InvalidRangeError(BaseException): code = codes.INVALID_DATE_RANGE_CODE diff --git a/mishards/factories.py b/mishards/factories.py index 26e9ab2619..c4037fe2d7 100644 --- a/mishards/factories.py +++ b/mishards/factories.py @@ -9,13 +9,16 @@ from faker.providers import BaseProvider from mishards import db from mishards.models import Tables, TableFiles + class FakerProvider(BaseProvider): def this_date(self): t = datetime.datetime.today() - return (t.year - 1900) * 10000 + (t.month-1)*100 + t.day + return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day + factory.Faker.add_provider(FakerProvider) + class TablesFactory(SQLAlchemyModelFactory): class Meta: model = Tables @@ -24,14 +27,15 @@ class TablesFactory(SQLAlchemyModelFactory): id = factory.Faker('random_number', digits=16, fix_len=True) table_id = factory.Faker('uuid4') - state = factory.Faker('random_element', elements=(0,1,2,3)) - dimension = factory.Faker('random_element', elements=(256,512)) + state = factory.Faker('random_element', elements=(0, 1, 2, 3)) + dimension = factory.Faker('random_element', elements=(256, 512)) created_on = int(time.time()) index_file_size = 0 - engine_type = factory.Faker('random_element', elements=(0,1,2,3)) - metric_type = factory.Faker('random_element', elements=(0,1)) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + metric_type = factory.Faker('random_element', elements=(0, 1)) nlist = 16384 + class TableFilesFactory(SQLAlchemyModelFactory): class Meta: model = TableFiles @@ -40,9 +44,9 @@ class TableFilesFactory(SQLAlchemyModelFactory): id = factory.Faker('random_number', digits=16, fix_len=True) table = factory.SubFactory(TablesFactory) - engine_type = factory.Faker('random_element', elements=(0,1,2,3)) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) file_id = factory.Faker('uuid4') - file_type = factory.Faker('random_element', elements=(0,1,2,3,4)) + file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) file_size = factory.Faker('random_number') updated_time = int(time.time()) created_on = int(time.time()) diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py index 550913ed60..f5225b2a66 100644 --- a/mishards/grpc_utils/__init__.py +++ b/mishards/grpc_utils/__init__.py @@ -14,21 +14,23 @@ class GrpcSpanDecorator(SpanDecorator): status = rpc_info.response.status except Exception as e: status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, - reason='Should not happen') + reason='Should not happen') if status.error_code == 0: return error_log = {'event': 'error', - 'request': rpc_info.request, - 'response': rpc_info.response - } + 'request': rpc_info.request, + 'response': rpc_info.response + } span.set_tag('error', True) span.log_kv(error_log) + def mark_grpc_method(func): setattr(func, 'grpc_method', True) return func + def is_grpc_method(func): if not func: return False diff --git a/mishards/grpc_utils/grpc_args_wrapper.py b/mishards/grpc_utils/grpc_args_wrapper.py index a864b1e400..7447dbd995 100644 --- a/mishards/grpc_utils/grpc_args_wrapper.py +++ b/mishards/grpc_utils/grpc_args_wrapper.py @@ -1,4 +1,4 @@ # class GrpcArgsWrapper(object): - # @classmethod - # def proto_TableName(cls): \ No newline at end of file +# @classmethod +# def proto_TableName(cls): diff --git a/mishards/hash_ring.py b/mishards/hash_ring.py index bfec108c5c..a97f3f580e 100644 --- a/mishards/hash_ring.py +++ b/mishards/hash_ring.py @@ -9,8 +9,8 @@ else: import md5 md5_constructor = md5.new -class HashRing(object): +class HashRing(object): def __init__(self, nodes=None, weights=None): """`nodes` is a list of objects that have a proper __str__ representation. `weights` is dictionary that sets weights to the nodes. The default @@ -40,13 +40,13 @@ class HashRing(object): if node in self.weights: weight = self.weights.get(node) - factor = math.floor((40*len(self.nodes)*weight) / total_weight); + factor = math.floor((40 * len(self.nodes) * weight) / total_weight) for j in range(0, int(factor)): - b_key = self._hash_digest( '%s-%s' % (node, j) ) + b_key = self._hash_digest('%s-%s' % (node, j)) for i in range(0, 3): - key = self._hash_val(b_key, lambda x: x+i*4) + key = self._hash_val(b_key, lambda x: x + i * 4) self.ring[key] = node self._sorted_keys.append(key) @@ -60,7 +60,7 @@ class HashRing(object): pos = self.get_node_pos(string_key) if pos is None: return None - return self.ring[ self._sorted_keys[pos] ] + return self.ring[self._sorted_keys[pos]] def get_node_pos(self, string_key): """Given a string key a corresponding node in the hash ring is returned @@ -94,6 +94,7 @@ class HashRing(object): yield None, None returned_values = set() + def distinct_filter(value): if str(value) not in returned_values: returned_values.add(str(value)) @@ -121,10 +122,8 @@ class HashRing(object): return self._hash_val(b_key, lambda x: x) def _hash_val(self, b_key, entry_fn): - return (( b_key[entry_fn(3)] << 24) - |(b_key[entry_fn(2)] << 16) - |(b_key[entry_fn(1)] << 8) - | b_key[entry_fn(0)] ) + return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( + b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] def _hash_digest(self, key): m = md5_constructor() @@ -132,12 +131,13 @@ class HashRing(object): m.update(key) return m.digest() + if __name__ == '__main__': from collections import defaultdict - servers = ['192.168.0.246:11212', - '192.168.0.247:11212', - '192.168.0.248:11212', - '192.168.0.249:11212'] + servers = [ + '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', + '192.168.0.249:11212' + ] ring = HashRing(servers) keys = ['{}'.format(i) for i in range(100)] @@ -146,5 +146,5 @@ if __name__ == '__main__': server = ring.get_node(k) mapped[server].append(k) - for k,v in mapped.items(): + for k, v in mapped.items(): print(k, v) diff --git a/mishards/main.py b/mishards/main.py index 5d8db0a179..3f69484ee4 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -1,13 +1,16 @@ -import os, sys +import os +import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from mishards import ( - settings, create_app) +from mishards import (settings, create_app) + def main(): - server = create_app(settings.TestingConfig if settings.TESTING else settings.DefaultConfig) + server = create_app( + settings.TestingConfig if settings.TESTING else settings.DefaultConfig) server.run(port=settings.SERVER_PORT) return 0 + if __name__ == '__main__': sys.exit(main()) diff --git a/mishards/models.py b/mishards/models.py index 0f7bb603ae..54cf5f8ed9 100644 --- a/mishards/models.py +++ b/mishards/models.py @@ -1,13 +1,14 @@ import logging from sqlalchemy import (Integer, Boolean, Text, - String, BigInteger, func, and_, or_, - Column) + String, BigInteger, func, and_, or_, + Column) from sqlalchemy.orm import relationship, backref from mishards import db logger = logging.getLogger(__name__) + class TableFiles(db.Model): FILE_TYPE_NEW = 0 FILE_TYPE_RAW = 1 @@ -57,16 +58,16 @@ class Tables(db.Model): def files_to_search(self, date_range=None): cond = or_( - TableFiles.file_type==TableFiles.FILE_TYPE_RAW, - TableFiles.file_type==TableFiles.FILE_TYPE_TO_INDEX, - TableFiles.file_type==TableFiles.FILE_TYPE_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_RAW, + TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, ) if date_range: cond = and_( cond, or_( - and_(TableFiles.date>=d[0], TableFiles.date= d[0], TableFiles.date < d[1]) for d in date_range + ) ) files = self.files.filter(cond) diff --git a/mishards/server.py b/mishards/server.py index c044bbb7ad..032d101cba 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -33,7 +33,7 @@ class Server: self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), - (cygrpc.ChannelArgKey.max_receive_message_length, -1)] + (cygrpc.ChannelArgKey.max_receive_message_length, -1)] ) self.server_impl = self.tracer.decorate(self.server_impl) @@ -46,7 +46,7 @@ class Server: ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) self.conn_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) def register_pre_run_handler(self, func): logger.info('Regiterring {} into server pre_run_handlers'.format(func)) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 60d64cef37..2a1e0eef02 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -11,7 +11,7 @@ from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult from milvus.client.Abstract import Range -from milvus.client import types +from milvus.client import types as Types from mishards import (db, settings, exceptions) from mishards.grpc_utils import mark_grpc_method @@ -24,6 +24,7 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 + def __init__(self, conn_mgr, tracer, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} @@ -44,8 +45,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return conn.conn def _format_date(self, start, end): - return ((start.year-1900)*10000 + (start.month-1)*100 + start.day - , (end.year-1900)*10000 + (end.month-1)*100 + end.day) + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) def _range_to_date(self, range_obj, metadata=None): try: @@ -54,8 +54,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): assert start < end except (ValueError, AssertionError): raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( - range_obj.start_date, range_obj.end_date - ), metadata=metadata) + range_obj.start_date, range_obj.end_date + ), metadata=metadata) return self._format_date(start, end) @@ -63,9 +63,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): # PXU TODO: Implement Thread-local Context try: table = db.Session.query(Tables).filter(and_( - Tables.table_id==table_id, - Tables.state!=Tables.TO_DELETE - )).first() + Tables.table_id == table_id, + Tables.state != Tables.TO_DELETE + )).first() except sqlalchemy_exc.SQLAlchemyError as e: raise exceptions.DBError(message=str(e), metadata=metadata) @@ -93,7 +93,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return routing def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): - status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") + status = status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") if not files_n_topk_results: return status, [] @@ -107,7 +107,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for request_pos, each_request_results in enumerate(files_collection.topk_query_result): request_results[request_pos].extend(each_request_results.query_result_arrays) request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, - reverse=reverse)[:topk] + reverse=reverse)[:topk] calc_time = time.time() - calc_time logger.info('Merge takes {}'.format(calc_time)) @@ -127,7 +127,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): routing = {} with self.tracer.start_span('get_routing', - child_of=context.get_active_span().context): + child_of=context.get_active_span().context): routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) @@ -140,28 +140,28 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def search(addr, query_params, vectors, topk, nprobe, **kwargs): logger.info('Send Search Request: addr={};params={};nq={};topk={};nprobe={}'.format( - addr, query_params, len(vectors), topk, nprobe - )) + addr, query_params, len(vectors), topk, nprobe + )) conn = self.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) span = span if span else context.get_active_span().context with self.tracer.start_span('search_{}'.format(addr), - child_of=context.get_active_span().context): + child_of=context.get_active_span().context): ret = conn.search_vectors_in_files(table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy=True) + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) end = time.time() logger.info('search_vectors_in_files takes: {}'.format(end - start)) all_topk_results.append(ret) with self.tracer.start_span('do_search', - child_of=context.get_active_span().context) as span: + child_of=context.get_active_span().context) as span: with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) @@ -170,9 +170,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): for res in rs: res.result() - reverse = table_meta.metric_type == types.MetricType.IP + reverse = table_meta.metric_type == Types.MetricType.IP with self.tracer.start_span('do_merge', - child_of=context.get_active_span().context): + child_of=context.get_active_span().context): return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) @mark_grpc_method @@ -201,8 +201,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) _bool = self.connection(metadata={ - 'resp_class': milvus_pb2.BoolReply - }).has_table(_table_name) + 'resp_class': milvus_pb2.BoolReply + }).has_table(_table_name) return milvus_pb2.BoolReply( status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), @@ -244,7 +244,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' _status, _ids = self.connection(metadata={ 'resp_class': milvus_pb2.VectorIds - }).add_vectors(None, None, insert_param=request) + }).add_vectors(None, None, insert_param=request) return milvus_pb2.VectorIds( status=status_pb2.Status(error_code=_status.code, reason=_status.message), vector_id_array=_ids @@ -266,7 +266,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if nprobe > self.MAX_NPROBE or nprobe <= 0: raise exceptions.InvalidArgumentError(message='Invalid nprobe: {}'.format(nprobe), - metadata=metadata) + metadata=metadata) table_meta = self.table_meta.get(table_name, None) @@ -332,8 +332,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) return milvus_pb2.TableSchema( - table_name=_table_name, - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) @mark_grpc_method @@ -391,8 +391,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _results = self.connection(metadata=metadata).show_tables() return milvus_pb2.TableNameList( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_names=_results + status=status_pb2.Status(error_code=_status.code, reason=_status.message), + table_names=_results ) @mark_grpc_method @@ -426,7 +426,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if not _status.OK(): return milvus_pb2.IndexParam( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) + status=status_pb2.Status(error_code=_status.code, reason=_status.message) ) metadata = { @@ -439,7 +439,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table_name, index=_index) + table_name=_table_name, index=_index) @mark_grpc_method def DropIndex(self, request, context): diff --git a/mishards/settings.py b/mishards/settings.py index f5028cbbc7..4563538a08 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -39,13 +39,15 @@ if SD_PROVIDER == 'Kubernetes': elif SD_PROVIDER == 'Static': from sd.static_provider import StaticProviderSettings SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []) - ) + hosts=env.list('SD_STATIC_HOSTS', []) + ) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') TRACING_TYPE = env.str('TRACING_TYPE', '') + + class TracingConfig: TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) @@ -54,7 +56,7 @@ class TracingConfig: 'sampler': { 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), 'param': env.str('TRACING_SAMPLER_PARAM', "1"), - }, + }, 'local_agent': { 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') @@ -62,10 +64,12 @@ class TracingConfig: 'logging': env.bool('TRACING_LOGGING', True) } + class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) + TESTING = env.bool('TESTING', False) if TESTING: class TestingConfig(DefaultConfig): diff --git a/mishards/test_connections.py b/mishards/test_connections.py index 1f46b60f8b..f1c54f0c61 100644 --- a/mishards/test_connections.py +++ b/mishards/test_connections.py @@ -6,6 +6,7 @@ from mishards import exceptions logger = logging.getLogger(__name__) + @pytest.mark.usefixtures('app') class TestConnection: def test_manager(self): @@ -30,8 +31,10 @@ class TestConnection: class Conn: def __init__(self, state): self.state = state + def connect(self, uri): return self.state + def connected(self): return self.state FAIL_CONN = Conn(False) @@ -48,6 +51,7 @@ class TestConnection: class Func(): def __init__(self): self.executed = False + def __call__(self): self.executed = True @@ -55,8 +59,8 @@ class TestConnection: RetryObj = Retry() c = Connection('client', uri='', - max_retry=max_retry, - on_retry_func=RetryObj) + max_retry=max_retry, + on_retry_func=RetryObj) c.conn = FAIL_CONN ff = Func() this_connect = c.connect(func=ff) diff --git a/mishards/test_models.py b/mishards/test_models.py index 85dcc246aa..d60b62713e 100644 --- a/mishards/test_models.py +++ b/mishards/test_models.py @@ -3,12 +3,13 @@ import pytest from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory from mishards import db, create_app, settings from mishards.factories import ( - Tables, TableFiles, - TablesFactory, TableFilesFactory - ) + Tables, TableFiles, + TablesFactory, TableFilesFactory +) logger = logging.getLogger(__name__) + @pytest.mark.usefixtures('app') class TestModels: def test_files_to_search(self): diff --git a/sd/__init__.py b/sd/__init__.py index 6dfba5ddc1..7943887d0f 100644 --- a/sd/__init__.py +++ b/sd/__init__.py @@ -24,4 +24,5 @@ class ProviderManager: def get_provider(cls, name): return cls.PROVIDERS.get(name, None) + from sd import kubernetes_provider, static_provider diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 51665a0cb5..924f1fc8a4 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -1,4 +1,5 @@ -import os, sys +import os +import sys if __name__ == '__main__': sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -71,7 +72,6 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): self.queue.put(event_message) - except Exception as exc: logger.error(exc) @@ -98,18 +98,18 @@ class K8SEventListener(threading.Thread, K8SMixin): resource_version = '' w = watch.Watch() for event in w.stream(self.v1.list_namespaced_event, namespace=self.namespace, - field_selector='involvedObject.kind=Pod'): + field_selector='involvedObject.kind=Pod'): if self.terminate: break resource_version = int(event['object'].metadata.resource_version) info = dict( - eType='WatchEvent', - pod=event['object'].involved_object.name, - reason=event['object'].reason, - message=event['object'].message, - start_up=self.at_start_up, + eType='WatchEvent', + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, ) self.at_start_up = False # logger.info('Received event: {}'.format(info)) @@ -135,7 +135,7 @@ class EventHandler(threading.Thread): def on_pod_started(self, event, **kwargs): try_cnt = 3 pod = None - while try_cnt > 0: + while try_cnt > 0: try_cnt -= 1 try: pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], namespace=self.namespace) @@ -203,6 +203,7 @@ class EventHandler(threading.Thread): except queue.Empty: continue + class KubernetesProviderSettings: def __init__(self, namespace, pod_patt, label_selector, in_cluster, poll_interval, **kwargs): self.namespace = namespace @@ -211,10 +212,12 @@ class KubernetesProviderSettings: self.in_cluster = in_cluster self.poll_interval = poll_interval + @singleton @ProviderManager.register_service_provider class KubernetesProvider(object): NAME = 'Kubernetes' + def __init__(self, settings, conn_mgr, **kwargs): self.namespace = settings.namespace self.pod_patt = settings.pod_patt @@ -233,27 +236,27 @@ class KubernetesProvider(object): self.v1 = client.CoreV1Api() self.listener = K8SEventListener( - message_queue=self.queue, - namespace=self.namespace, - in_cluster=self.in_cluster, - v1=self.v1, - **kwargs - ) + message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs + ) self.pod_heartbeater = K8SHeartbeatHandler( - message_queue=self.queue, - namespace=self.namespace, - label_selector=self.label_selector, - in_cluster=self.in_cluster, - v1=self.v1, - poll_interval=self.poll_interval, - **kwargs - ) + message_queue=self.queue, + namespace=self.namespace, + label_selector=self.label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + poll_interval=self.poll_interval, + **kwargs + ) self.event_handler = EventHandler(mgr=self, - message_queue=self.queue, - namespace=self.namespace, - pod_patt=self.pod_patt, **kwargs) + message_queue=self.queue, + namespace=self.namespace, + pod_patt=self.pod_patt, **kwargs) def add_pod(self, name, ip): self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) @@ -276,9 +279,11 @@ class KubernetesProvider(object): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) + class Connect: def register(self, name, value): logger.error('Register: {} - {}'.format(name, value)) + def unregister(self, name): logger.error('Unregister: {}'.format(name)) @@ -289,16 +294,16 @@ if __name__ == '__main__': connect_mgr = Connect() settings = KubernetesProviderSettings( - namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) + namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) provider_class = ProviderManager.get_provider('Kubernetes') t = provider_class(conn_mgr=connect_mgr, - settings=settings - ) + settings=settings + ) t.start() cnt = 100 while cnt > 0: diff --git a/sd/static_provider.py b/sd/static_provider.py index 423d6c4d60..5c97c4efd0 100644 --- a/sd/static_provider.py +++ b/sd/static_provider.py @@ -1,4 +1,5 @@ -import os, sys +import os +import sys if __name__ == '__main__': sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -6,14 +7,17 @@ import socket from utils import singleton from sd import ProviderManager + class StaticProviderSettings: def __init__(self, hosts): self.hosts = hosts + @singleton @ProviderManager.register_service_provider class KubernetesProvider(object): NAME = 'Static' + def __init__(self, settings, conn_mgr, **kwargs): self.conn_mgr = conn_mgr self.hosts = [socket.gethostbyname(host) for host in settings.hosts] diff --git a/tracing/__init__.py b/tracing/__init__.py index 27c57473db..5014309a52 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,13 +1,14 @@ def empty_server_interceptor_decorator(target_server, interceptor): return target_server + class Tracer: def __init__(self, tracer=None, - interceptor=None, - server_decorator=empty_server_interceptor_decorator): + interceptor=None, + server_decorator=empty_server_interceptor_decorator): self.tracer = tracer self.interceptor = interceptor - self.server_decorator=server_decorator + self.server_decorator = server_decorator def decorate(self, server): return self.server_decorator(server, self.interceptor) @@ -16,7 +17,7 @@ class Tracer: self.tracer and self.tracer.close() def start_span(self, operation_name=None, - child_of=None, references=None, tags=None, - start_time=None, ignore_active_span=False): + child_of=None, references=None, tags=None, + start_time=None, ignore_active_span=False): return self.tracer.start_span(operation_name, child_of, - references, tags, start_time, ignore_active_span) + references, tags, start_time, ignore_active_span) diff --git a/tracing/factory.py b/tracing/factory.py index fd06fe3cac..648dfa291e 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -4,7 +4,7 @@ from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor from tracing import (Tracer, - empty_server_interceptor_decorator) + empty_server_interceptor_decorator) logger = logging.getLogger(__name__) @@ -17,14 +17,14 @@ class TracerFactory: if tracer_type.lower() == 'jaeger': config = Config(config=tracer_config.TRACING_CONFIG, - service_name=tracer_config.TRACING_SERVICE_NAME, - validate=tracer_config.TRACING_VALIDATE - ) + service_name=tracer_config.TRACING_SERVICE_NAME, + validate=tracer_config.TRACING_VALIDATE + ) tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor(tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) diff --git a/utils/__init__.py b/utils/__init__.py index ec7f32bcbc..c1d55e76c0 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,5 +1,6 @@ from functools import wraps + def singleton(cls): instances = {} @wraps(cls) diff --git a/utils/logger_helper.py b/utils/logger_helper.py index 1b59aa40ec..55ce3206ab 100644 --- a/utils/logger_helper.py +++ b/utils/logger_helper.py @@ -9,18 +9,22 @@ class InfoFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.INFO + class DebugFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.DEBUG + class WarnFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.WARN + class ErrorFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.ERROR + class CriticalFilter(logging.Filter): def filter(self, rec): return rec.levelno == logging.CRITICAL @@ -36,6 +40,7 @@ COLORS = { 'ENDC': '\033[0m', } + class ColorFulFormatColMixin: def format_col(self, message_str, level_name): if level_name in COLORS.keys(): @@ -43,12 +48,14 @@ class ColorFulFormatColMixin: 'ENDC') return message_str + class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): def format(self, record): message_str = super(ColorfulFormatter, self).format(record) return self.format_col(message_str, level_name=record.levelname) + def config(log_level, log_path, name, tz='UTC'): def build_log_file(level, log_path, name, tz): utc_now = datetime.datetime.utcnow() @@ -56,7 +63,7 @@ def config(log_level, log_path, name, tz='UTC'): local_tz = timezone(tz) tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), - level) + level) if not os.path.exists(log_path): os.makedirs(log_path) @@ -66,10 +73,10 @@ def config(log_level, log_path, name, tz='UTC'): 'disable_existing_loggers': False, 'formatters': { 'default': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' }, 'colorful_console': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', + 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', '()': ColorfulFormatter, }, }, @@ -133,8 +140,8 @@ def config(log_level, log_path, name, tz='UTC'): }, 'loggers': { '': { - 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', \ - 'milvus_error_file', 'milvus_critical_file'], + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', + 'milvus_error_file', 'milvus_critical_file'], 'level': log_level, 'propagate': False }, From 4455f539fab8fbf0343b7678a1b1182ac7afb2a3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 13:54:37 +0800 Subject: [PATCH 056/196] code refactor for unused import --- mishards/connections.py | 2 -- mishards/models.py | 2 +- mishards/server.py | 2 -- mishards/service_handler.py | 1 - sd/kubernetes_provider.py | 5 ++--- 5 files changed, 3 insertions(+), 9 deletions(-) diff --git a/mishards/connections.py b/mishards/connections.py index ccd8e7e81b..22263e9e7e 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -1,8 +1,6 @@ import logging import threading -import socket from functools import wraps -from contextlib import contextmanager from milvus import Milvus from mishards import (settings, exceptions) diff --git a/mishards/models.py b/mishards/models.py index 54cf5f8ed9..4b6c8f9ef4 100644 --- a/mishards/models.py +++ b/mishards/models.py @@ -1,6 +1,6 @@ import logging from sqlalchemy import (Integer, Boolean, Text, - String, BigInteger, func, and_, or_, + String, BigInteger, and_, or_, Column) from sqlalchemy.orm import relationship, backref diff --git a/mishards/server.py b/mishards/server.py index 032d101cba..feb2176e86 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -7,8 +7,6 @@ from urllib.parse import urlparse from functools import wraps from concurrent import futures from grpc._cython import cygrpc -from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable -from jaeger_client import Config from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server from mishards.grpc_utils import is_grpc_method from mishards.service_handler import ServiceHandler diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 2a1e0eef02..9d851ecfcb 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -1,7 +1,6 @@ import logging import time import datetime -from contextlib import contextmanager from collections import defaultdict from sqlalchemy import and_ diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 924f1fc8a4..8ee1588ec4 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -9,7 +9,6 @@ import time import copy import threading import queue -from functools import wraps from kubernetes import client, config, watch from utils import singleton @@ -17,7 +16,7 @@ from sd import ProviderManager logger = logging.getLogger(__name__) -incluster_namespace_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' +INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' class K8SMixin: @@ -27,7 +26,7 @@ class K8SMixin: self.kwargs = kwargs self.v1 = kwargs.get('v1', None) if not self.namespace: - self.namespace = open(incluster_namespace_path).read() + self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() if not self.v1: config.load_incluster_config() if self.in_cluster else config.load_kube_config() From 7ccab1640f78ceb1555cc3633d5d6d140f693f7f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:04:37 +0800 Subject: [PATCH 057/196] update pymilvus version --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e94f8d1597..ea338d0723 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ py==1.8.0 pyasn1==0.4.7 pyasn1-modules==0.2.6 pylint==2.3.1 -pymilvus-test==0.2.15 +pymilvus-test==0.2.21 #pymilvus==0.2.0 pyparsing==2.4.0 pytest==4.6.3 From f32d269eed453aa8dab638fc05c6d2f051fa7bd4 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:04:55 +0800 Subject: [PATCH 058/196] update for docker-compose --- start_services.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/start_services.yml b/start_services.yml index 5c779c5b82..b2d4d97cb6 100644 --- a/start_services.yml +++ b/start_services.yml @@ -3,7 +3,7 @@ services: milvus: runtime: nvidia restart: always - image: registry.zilliz.com/milvus/engine:branch-0.4.0-release-c58ca6 + image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de # ports: # - "0.0.0.0:19530:19530" volumes: @@ -21,13 +21,13 @@ services: mishards: restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.2 + image: registry.zilliz.com/milvus/mishards:v0.0.3 ports: - "0.0.0.0:19530:19531" - "0.0.0.0:19532:19532" volumes: - /tmp/milvus/db:/tmp/milvus/db - - /tmp/mishards_env:/source/mishards/.env + # - /tmp/mishards_env:/source/mishards/.env command: ["python", "mishards/main.py"] environment: DEBUG: 'true' From fd735cc62efbd29980839454e1113afe95633178 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:17:08 +0800 Subject: [PATCH 059/196] change read .env and read .env.example --- mishards/.env.example | 2 +- mishards/settings.py | 18 ++++++++++-------- start_services.yml | 1 + 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index 47a4549f04..bfea0a3edc 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -18,7 +18,7 @@ SD_STATIC_HOSTS=127.0.0.1 SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True -TESTING=True +TESTING=False #SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_TEST_ECHO=False diff --git a/mishards/settings.py b/mishards/settings.py index 4563538a08..1982a508e7 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -1,12 +1,15 @@ import sys import os -from dotenv import load_dotenv -load_dotenv('./mishards/.env.example') - from environs import Env env = Env() -env.read_env(override=True) + +FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) +if FROM_EXAMPLE: + from dotenv import load_dotenv + load_dotenv('./mishards/.env.example') +else: + env.read_env() DEBUG = env.bool('DEBUG', False) @@ -34,13 +37,11 @@ if SD_PROVIDER == 'Kubernetes': in_cluster=env.bool('SD_IN_CLUSTER', False), poll_interval=env.int('SD_POLL_INTERVAL', 5), pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', '') - ) + label_selector=env.str('SD_LABEL_SELECTOR', '')) elif SD_PROVIDER == 'Static': from sd.static_provider import StaticProviderSettings SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []) - ) + hosts=env.list('SD_STATIC_HOSTS', [])) TESTING = env.bool('TESTING', False) TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') @@ -72,6 +73,7 @@ class DefaultConfig: TESTING = env.bool('TESTING', False) if TESTING: + class TestingConfig(DefaultConfig): SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) diff --git a/start_services.yml b/start_services.yml index b2d4d97cb6..c7a3c36f51 100644 --- a/start_services.yml +++ b/start_services.yml @@ -30,6 +30,7 @@ services: # - /tmp/mishards_env:/source/mishards/.env command: ["python", "mishards/main.py"] environment: + FROM_EXAMPLE: 'true' DEBUG: 'true' SERVER_PORT: 19531 WOSERVER: tcp://milvus:19530 From 4dd19f607d4ff23276864bd1b935fe415eaaa515 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:35:29 +0800 Subject: [PATCH 060/196] update build.sh --- build.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/build.sh b/build.sh index 2b3c89bbf9..c46b6a8ea9 100755 --- a/build.sh +++ b/build.sh @@ -5,6 +5,8 @@ NORMAL=`tput sgr0` YELLOW='\033[1;33m' ENDC='\033[0m' +echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" + function build_image() { dockerfile=$1 remote_registry=$2 @@ -21,12 +23,17 @@ function build_image() { case "$1" in all) + [[ -z $MISHARDS_REGISTRY ]] && { + echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx${ENDC}" + exit 1 + } + version="" [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "registry.zilliz.com/milvus/mishards${version}" "registry.zilliz.com/milvus/mishards" + build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" ;; *) echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => registry.zilliz.com/milvus/mishards:\${tagname}" + echo "all, Usage: build.sh all [tagname|] => ${MISHARDS_REGISTRY}:\${tagname}" ;; esac From 66fc20ee54f3040f22ee3b4a5f48d11e84c21056 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:35:29 +0800 Subject: [PATCH 061/196] update build.sh update build.sh --- build.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/build.sh b/build.sh index 2b3c89bbf9..8e142d0115 100755 --- a/build.sh +++ b/build.sh @@ -21,12 +21,17 @@ function build_image() { case "$1" in all) + [[ -z $MISHARDS_REGISTRY ]] && { + echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" + exit 1 + } + version="" [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "registry.zilliz.com/milvus/mishards${version}" "registry.zilliz.com/milvus/mishards" + build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" ;; *) echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => registry.zilliz.com/milvus/mishards:\${tagname}" + echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" ;; esac From 8a432bc472d903e7d783d71f84e2d61768813518 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 15:56:47 +0800 Subject: [PATCH 062/196] update k8s provider for sd --- sd/kubernetes_provider.py | 108 ++++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 8ee1588ec4..9a15b2fa78 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -1,7 +1,8 @@ import os import sys if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) import re import logging @@ -9,6 +10,7 @@ import time import copy import threading import queue +import enum from kubernetes import client, config, watch from utils import singleton @@ -19,6 +21,11 @@ logger = logging.getLogger(__name__) INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' +class EventType(enum.Enum): + PodHeartBeat = 1 + Watch = 2 + + class K8SMixin: def __init__(self, namespace, in_cluster=False, **kwargs): self.namespace = namespace @@ -29,13 +36,22 @@ class K8SMixin: self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() if not self.v1: - config.load_incluster_config() if self.in_cluster else config.load_kube_config() + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() self.v1 = client.CoreV1Api() class K8SHeartbeatHandler(threading.Thread, K8SMixin): - def __init__(self, message_queue, namespace, label_selector, in_cluster=False, **kwargs): - K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + def __init__(self, + message_queue, + namespace, + label_selector, + in_cluster=False, + **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) threading.Thread.__init__(self) self.queue = message_queue self.terminate = False @@ -45,13 +61,13 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): def run(self): while not self.terminate: try: - pods = self.v1.list_namespaced_pod(namespace=self.namespace, label_selector=self.label_selector) - event_message = { - 'eType': 'PodHeartBeat', - 'events': [] - } + pods = self.v1.list_namespaced_pod( + namespace=self.namespace, + label_selector=self.label_selector) + event_message = {'eType': EventType.PodHeartBeat, 'events': []} for item in pods.items: - pod = self.v1.read_namespaced_pod(name=item.metadata.name, namespace=self.namespace) + pod = self.v1.read_namespaced_pod(name=item.metadata.name, + namespace=self.namespace) name = pod.metadata.name ip = pod.status.pod_ip phase = pod.status.phase @@ -59,13 +75,11 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): message = pod.status.message ready = True if phase == 'Running' else False - pod_event = dict( - pod=name, - ip=ip, - ready=ready, - reason=reason, - message=message - ) + pod_event = dict(pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message) event_message['events'].append(pod_event) @@ -82,7 +96,10 @@ class K8SHeartbeatHandler(threading.Thread, K8SMixin): class K8SEventListener(threading.Thread, K8SMixin): def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): - K8SMixin.__init__(self, namespace=namespace, in_cluster=in_cluster, **kwargs) + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) threading.Thread.__init__(self) self.queue = message_queue self.terminate = False @@ -96,7 +113,8 @@ class K8SEventListener(threading.Thread, K8SMixin): def run(self): resource_version = '' w = watch.Watch() - for event in w.stream(self.v1.list_namespaced_event, namespace=self.namespace, + for event in w.stream(self.v1.list_namespaced_event, + namespace=self.namespace, field_selector='involvedObject.kind=Pod'): if self.terminate: break @@ -104,7 +122,7 @@ class K8SEventListener(threading.Thread, K8SMixin): resource_version = int(event['object'].metadata.resource_version) info = dict( - eType='WatchEvent', + eType=EventType.Watch, pod=event['object'].involved_object.name, reason=event['object'].reason, message=event['object'].message, @@ -137,7 +155,8 @@ class EventHandler(threading.Thread): while try_cnt > 0: try_cnt -= 1 try: - pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], namespace=self.namespace) + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], + namespace=self.namespace) if not pod.status.pod_ip: time.sleep(0.5) continue @@ -147,13 +166,15 @@ class EventHandler(threading.Thread): if try_cnt <= 0 and not pod: if not event['start_up']: - logger.error('Pod {} is started but cannot read pod'.format(event['pod'])) + logger.error('Pod {} is started but cannot read pod'.format( + event['pod'])) return elif try_cnt <= 0 and not pod.status.pod_ip: logger.warn('NoPodIPFoundError') return - logger.info('Register POD {} with IP {}'.format(pod.metadata.name, pod.status.pod_ip)) + logger.info('Register POD {} with IP {}'.format( + pod.metadata.name, pod.status.pod_ip)) self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) def on_pod_killing(self, event, **kwargs): @@ -178,7 +199,7 @@ class EventHandler(threading.Thread): logger.info(self.mgr.conn_mgr.conn_names) def handle_event(self, event): - if event['eType'] == 'PodHeartBeat': + if event['eType'] == EventType.PodHeartBeat: return self.on_pod_heartbeat(event) if not event or (event['reason'] not in ('Started', 'Killing')): @@ -204,7 +225,8 @@ class EventHandler(threading.Thread): class KubernetesProviderSettings: - def __init__(self, namespace, pod_patt, label_selector, in_cluster, poll_interval, **kwargs): + def __init__(self, namespace, pod_patt, label_selector, in_cluster, + poll_interval, **kwargs): self.namespace = namespace self.pod_patt = pod_patt self.label_selector = label_selector @@ -231,16 +253,15 @@ class KubernetesProvider(object): if not self.namespace: self.namespace = open(incluster_namespace_path).read() - config.load_incluster_config() if self.in_cluster else config.load_kube_config() + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() self.v1 = client.CoreV1Api() - self.listener = K8SEventListener( - message_queue=self.queue, - namespace=self.namespace, - in_cluster=self.in_cluster, - v1=self.v1, - **kwargs - ) + self.listener = K8SEventListener(message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs) self.pod_heartbeater = K8SHeartbeatHandler( message_queue=self.queue, @@ -249,13 +270,13 @@ class KubernetesProvider(object): in_cluster=self.in_cluster, v1=self.v1, poll_interval=self.poll_interval, - **kwargs - ) + **kwargs) self.event_handler = EventHandler(mgr=self, message_queue=self.queue, namespace=self.namespace, - pod_patt=self.pod_patt, **kwargs) + pod_patt=self.pod_patt, + **kwargs) def add_pod(self, name, ip): self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) @@ -292,17 +313,14 @@ if __name__ == '__main__': connect_mgr = Connect() - settings = KubernetesProviderSettings( - namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) + settings = KubernetesProviderSettings(namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) provider_class = ProviderManager.get_provider('Kubernetes') - t = provider_class(conn_mgr=connect_mgr, - settings=settings - ) + t = provider_class(conn_mgr=connect_mgr, settings=settings) t.start() cnt = 100 while cnt > 0: From c4f7b7c4b2d206f0051cf79ac193ffa3500f7b58 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 16:11:40 +0800 Subject: [PATCH 063/196] update docker and git ignore --- .dockerignore | 2 ++ .gitignore | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.dockerignore b/.dockerignore index d1012a3afd..7f608f71d6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,7 @@ .git .gitignore .env +.coverage +cov_html/ mishards/.env diff --git a/.gitignore b/.gitignore index 624eb4fa58..8919efeb01 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ .env +.coverage +cov_html/ __pycache__/ From 8ad5d6c2d95a06df5e39200d6e7c9419789ecc2e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 14 Oct 2019 17:05:11 +0800 Subject: [PATCH 064/196] add test_grpc --- mishards/grpc_utils/test_grpc.py | 77 ++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 mishards/grpc_utils/test_grpc.py diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py new file mode 100644 index 0000000000..068ee391e7 --- /dev/null +++ b/mishards/grpc_utils/test_grpc.py @@ -0,0 +1,77 @@ +import logging +import opentracing +from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method +from milvus.grpc_gen import status_pb2, milvus_pb2 + + +logger = logging.getLogger(__name__) + + +class TestTracer(opentracing.Tracer): + pass + +class TestSpan(opentracing.Span): + def __init__(self, context, tracer, **kwargs): + super(TestSpan, self).__init__(tracer, context) + self.reset() + + def set_tag(self, key, value): + self.tags.append({key:value}) + + def log_kv(self, key_values, timestamp=None): + self.logs.append(key_values) + + def reset(self): + self.tags = [] + self.logs = [] + + +class TestRpcInfo: + def __init__(self, request, response): + self.request = request + self.response = response + + +class TestGrpcUtils: + def test_span_deco(self): + request = 'request' + OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') + response = OK + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = milvus_pb2.BoolReply(status=OK, bool_reply=False) + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = 1 + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + logger.error(span.logs) + assert len(span.logs) == 1 + assert len(span.tags) == 1 + + response = 0 + rpc_info = TestRpcInfo(request=request, response=response) + span = TestSpan(context=None, tracer=TestTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + logger.error(span.logs) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + def test_is_grpc_method(self): + target = 1 + assert not is_grpc_method(target) + target = None + assert not is_grpc_method(target) From 4aa29968a68ad16abefe29941e43c5148c99164b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 16 Oct 2019 14:19:01 +0800 Subject: [PATCH 065/196] update for TESTING changes --- conftest.py | 10 ++++++++++ mishards/__init__.py | 2 +- mishards/connections.py | 2 +- mishards/grpc_utils/test_grpc.py | 2 -- mishards/main.py | 3 +-- mishards/server.py | 2 +- mishards/settings.py | 22 ++++++++++++---------- mishards/test_connections.py | 26 +++++++++++++++++++++++++- requirements.txt | 1 + tracing/factory.py | 12 ++++++++---- 10 files changed, 60 insertions(+), 22 deletions(-) diff --git a/conftest.py b/conftest.py index d6c9f3acc7..1aba5b32cf 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,6 @@ import logging import pytest +import grpc from mishards import settings, db, create_app logger = logging.getLogger(__name__) @@ -14,3 +15,12 @@ def app(request): yield app db.drop_all() + +@pytest.fixture +def started_app(app): + app.on_pre_run() + app.start(app.port) + + yield app + + app.stop() diff --git a/mishards/__init__.py b/mishards/__init__.py index 47d8adb6e3..4bd77d8c60 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -24,7 +24,7 @@ def create_app(testing_config=None): from tracing.factory import TracerFactory from mishards.grpc_utils import GrpcSpanDecorator - tracer = TracerFactory.new_tracer(settings.TRACING_TYPE, settings.TracingConfig, + tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, span_decorator=GrpcSpanDecorator()) grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) diff --git a/mishards/connections.py b/mishards/connections.py index 22263e9e7e..7db271381c 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -18,7 +18,7 @@ class Connection: self.conn = Milvus() self.error_handlers = [] if not error_handlers else error_handlers self.on_retry_func = kwargs.get('on_retry_func', None) - self._connect() + # self._connect() def __str__(self): return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py index 068ee391e7..d8511c8d6c 100644 --- a/mishards/grpc_utils/test_grpc.py +++ b/mishards/grpc_utils/test_grpc.py @@ -57,7 +57,6 @@ class TestGrpcUtils: span = TestSpan(context=None, tracer=TestTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) - logger.error(span.logs) assert len(span.logs) == 1 assert len(span.tags) == 1 @@ -66,7 +65,6 @@ class TestGrpcUtils: span = TestSpan(context=None, tracer=TestTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) - logger.error(span.logs) assert len(span.logs) == 0 assert len(span.tags) == 0 diff --git a/mishards/main.py b/mishards/main.py index 3f69484ee4..c0d142607b 100644 --- a/mishards/main.py +++ b/mishards/main.py @@ -6,8 +6,7 @@ from mishards import (settings, create_app) def main(): - server = create_app( - settings.TestingConfig if settings.TESTING else settings.DefaultConfig) + server = create_app(settings.DefaultConfig) server.run(port=settings.SERVER_PORT) return 0 diff --git a/mishards/server.py b/mishards/server.py index feb2176e86..dcaacd0fbc 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -39,7 +39,7 @@ class Server: self.register_pre_run_handler(self.pre_run_handler) def pre_run_handler(self): - woserver = settings.WOSERVER if not settings.TESTING else settings.TESTING_WOSERVER + woserver = settings.WOSERVER url = urlparse(woserver) ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) diff --git a/mishards/settings.py b/mishards/settings.py index 1982a508e7..c9b62717d4 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -43,10 +43,7 @@ elif SD_PROVIDER == 'Static': SD_PROVIDER_SETTINGS = StaticProviderSettings( hosts=env.list('SD_STATIC_HOSTS', [])) -TESTING = env.bool('TESTING', False) -TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') - -TRACING_TYPE = env.str('TRACING_TYPE', '') +# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') class TracingConfig: @@ -64,19 +61,24 @@ class TracingConfig: }, 'logging': env.bool('TRACING_LOGGING', True) } + DEFAULT_TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "0"), + } + } class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) + TRACING_TYPE = env.str('TRACING_TYPE', '') -TESTING = env.bool('TESTING', False) -if TESTING: - - class TestingConfig(DefaultConfig): - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') - SQL_ECHO = env.bool('SQL_TEST_ECHO', False) +class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') if __name__ == '__main__': diff --git a/mishards/test_connections.py b/mishards/test_connections.py index f1c54f0c61..819d2e03da 100644 --- a/mishards/test_connections.py +++ b/mishards/test_connections.py @@ -1,6 +1,8 @@ import logging import pytest +import mock +from milvus import Milvus from mishards.connections import (ConnectionMgr, Connection) from mishards import exceptions @@ -27,6 +29,12 @@ class TestConnection: mgr.register('WOSERVER', 'xxxx') assert len(mgr.conn_names) == 0 + assert not mgr.conn('XXXX', None) + with pytest.raises(exceptions.ConnectionNotFoundError): + mgr.conn('XXXX', None, True) + + mgr.conn('WOSERVER', None) + def test_connection(self): class Conn: def __init__(self, state): @@ -37,6 +45,7 @@ class TestConnection: def connected(self): return self.state + FAIL_CONN = Conn(False) PASS_CONN = Conn(True) @@ -58,7 +67,9 @@ class TestConnection: max_retry = 3 RetryObj = Retry() - c = Connection('client', uri='', + + c = Connection('client', + uri='xx', max_retry=max_retry, on_retry_func=RetryObj) c.conn = FAIL_CONN @@ -75,3 +86,16 @@ class TestConnection: this_connect() assert ff.executed assert RetryObj.times == 0 + + this_connect = c.connect(func=None) + with pytest.raises(TypeError): + this_connect() + + errors = [] + + def error_handler(err): + errors.append(err) + + this_connect = c.connect(func=None, exception_handler=error_handler) + this_connect() + assert len(errors) == 1 diff --git a/requirements.txt b/requirements.txt index ea338d0723..133cfac8ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,3 +33,4 @@ SQLAlchemy==1.3.5 urllib3==1.25.3 jaeger-client>=3.4.0 grpcio-opentracing>=1.0 +mock==2.0.0 diff --git a/tracing/factory.py b/tracing/factory.py index 648dfa291e..0c14d9d536 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -12,13 +12,17 @@ logger = logging.getLogger(__name__) class TracerFactory: @classmethod def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): + config = tracer_config.TRACING_CONFIG + service_name = tracer_config.TRACING_SERVICE_NAME + validate=tracer_config.TRACING_VALIDATE if not tracer_type: - return Tracer() + tracer_type = 'jaeger' + config = tracer_config.DEFAULT_TRACING_CONFIG if tracer_type.lower() == 'jaeger': - config = Config(config=tracer_config.TRACING_CONFIG, - service_name=tracer_config.TRACING_SERVICE_NAME, - validate=tracer_config.TRACING_VALIDATE + config = Config(config=config, + service_name=service_name, + validate=validate ) tracer = config.initialize_tracer() From 9012f47a101228f956d04cc2eae804f38ca4e50e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 16 Oct 2019 17:38:34 +0800 Subject: [PATCH 066/196] changes for unit test --- mishards/grpc_utils/test_grpc.py | 24 +++++++-------- mishards/service_handler.py | 51 ++++++++++++++++++++++++-------- tracing/factory.py | 8 +++-- 3 files changed, 56 insertions(+), 27 deletions(-) diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py index d8511c8d6c..314fccfe00 100644 --- a/mishards/grpc_utils/test_grpc.py +++ b/mishards/grpc_utils/test_grpc.py @@ -7,12 +7,12 @@ from milvus.grpc_gen import status_pb2, milvus_pb2 logger = logging.getLogger(__name__) -class TestTracer(opentracing.Tracer): +class FakeTracer(opentracing.Tracer): pass -class TestSpan(opentracing.Span): +class FakeSpan(opentracing.Span): def __init__(self, context, tracer, **kwargs): - super(TestSpan, self).__init__(tracer, context) + super(FakeSpan, self).__init__(tracer, context) self.reset() def set_tag(self, key, value): @@ -26,7 +26,7 @@ class TestSpan(opentracing.Span): self.logs = [] -class TestRpcInfo: +class FakeRpcInfo: def __init__(self, request, response): self.request = request self.response = response @@ -37,32 +37,32 @@ class TestGrpcUtils: request = 'request' OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') response = OK - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 0 assert len(span.tags) == 0 response = milvus_pb2.BoolReply(status=OK, bool_reply=False) - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 0 assert len(span.tags) == 0 response = 1 - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 1 assert len(span.tags) == 1 response = 0 - rpc_info = TestRpcInfo(request=request, response=response) - span = TestSpan(context=None, tracer=TestTracer()) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) span_deco = GrpcSpanDecorator() span_deco(span, rpc_info) assert len(span.logs) == 0 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 9d851ecfcb..113ec3ca20 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -237,13 +237,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _add_vectors(self, param, metadata=None): + return self.connection(metadata=metadata).add_vectors(None, None, insert_param=param) + @mark_grpc_method def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self.connection(metadata={ - 'resp_class': milvus_pb2.VectorIds - }).add_vectors(None, None, insert_param=request) + _status, _ids = self._add_vectors(metadata={ + 'resp_class': milvus_pb2.VectorIds}, param=request) return milvus_pb2.VectorIds( status=status_pb2.Status(error_code=_status.code, reason=_status.message), vector_id_array=_ids @@ -305,6 +307,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def SearchInFiles(self, request, context): raise NotImplemented() + def _describe_table(self, table_name, metadata=None): + return self.connection(metadata=metadata).describe_table(table_name) + @mark_grpc_method def DescribeTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -319,7 +324,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self.connection(metadata=metadata).describe_table(_table_name) + _status, _table = self._describe_table(metadata=metadata, table_name=_table_name) if _status.OK(): return milvus_pb2.TableSchema( @@ -335,6 +340,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): status=status_pb2.Status(error_code=_status.code, reason=_status.message), ) + def _count_table(self, table_name, metadata=None): + return self.connection(metadata=metadata).get_table_row_count(table_name) + @mark_grpc_method def CountTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -351,12 +359,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): metadata = { 'resp_class': milvus_pb2.TableRowCount } - _status, _count = self.connection(metadata=metadata).get_table_row_count(_table_name) + _status, _count = self._count_table(_table_name, metadata=metadata) return milvus_pb2.TableRowCount( status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) + + def _get_server_version(self, metadata=None): + return self.connection(metadata=metadata).server_version() + @mark_grpc_method def Cmd(self, request, context): _status, _cmd = Parser.parse_proto_Command(request) @@ -364,7 +376,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if not _status.OK(): return milvus_pb2.StringReply( - status_pb2.Status(error_code=_status.code, reason=_status.message) + status=status_pb2.Status(error_code=_status.code, reason=_status.message) ) metadata = { @@ -372,7 +384,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } if _cmd == 'version': - _status, _reply = self.connection(metadata=metadata).server_version() + _status, _reply = self._get_server_version(metadata=metadata) else: _status, _reply = self.connection(metadata=metadata).server_status() @@ -381,19 +393,25 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply ) + def _show_tables(self): + return self.connection(metadata=metadata).show_tables() + @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') metadata = { 'resp_class': milvus_pb2.TableName } - _status, _results = self.connection(metadata=metadata).show_tables() + _status, _results = self._show_tables() return milvus_pb2.TableNameList( status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_names=_results ) + def _delete_by_range(self, table_name, start_date, end_date): + return self.connection().delete_vectors_by_range(table_name, start_date, end_date) + @mark_grpc_method def DeleteByRange(self, request, context): _status, unpacks = \ @@ -405,9 +423,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _table_name, _start_date, _end_date = unpacks logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) - _status = self.connection().delete_vectors_by_range(_table_name, _start_date, _end_date) + _status = self._delete_by_range(_table_name, _start_date, _end_date) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _preload_table(self, table_name): + return self.connection().preload_table(table_name) + @mark_grpc_method def PreloadTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -416,9 +437,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('PreloadTable {}'.format(_table_name)) - _status = self.connection().preload_table(_table_name) + _status = self._preload_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _describe_index(self, table_name, metadata=None): + return self.connection(metadata=metadata).describe_index(table_name) + @mark_grpc_method def DescribeIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -433,13 +457,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): } logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self.connection(metadata=metadata).describe_index(_table_name) + _status, _index_param = self._describe_index(table_name=_table_name, metadata=metadata) _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_name=_table_name, index=_index) + def _drop_index(self, table_name): + return self.connection().drop_index(table_name) + @mark_grpc_method def DropIndex(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -448,5 +475,5 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) logger.info('DropIndex {}'.format(_table_name)) - _status = self.connection().drop_index(_table_name) + _status = self._drop_index(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) diff --git a/tracing/factory.py b/tracing/factory.py index 0c14d9d536..61cd75fcd6 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -12,12 +12,14 @@ logger = logging.getLogger(__name__) class TracerFactory: @classmethod def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): + if not tracer_type: + return Tracer() config = tracer_config.TRACING_CONFIG service_name = tracer_config.TRACING_SERVICE_NAME validate=tracer_config.TRACING_VALIDATE - if not tracer_type: - tracer_type = 'jaeger' - config = tracer_config.DEFAULT_TRACING_CONFIG + # if not tracer_type: + # tracer_type = 'jaeger' + # config = tracer_config.DEFAULT_TRACING_CONFIG if tracer_type.lower() == 'jaeger': config = Config(config=config, From e0498e081df88eecb646c9d86cf744412f908902 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 17 Oct 2019 14:13:50 +0800 Subject: [PATCH 067/196] update for server test update for server test --- mishards/factories.py | 5 +- mishards/service_handler.py | 37 +++-- mishards/test_server.py | 279 ++++++++++++++++++++++++++++++++++++ tracing/__init__.py | 13 ++ 4 files changed, 320 insertions(+), 14 deletions(-) create mode 100644 mishards/test_server.py diff --git a/mishards/factories.py b/mishards/factories.py index c4037fe2d7..52c0253b39 100644 --- a/mishards/factories.py +++ b/mishards/factories.py @@ -6,6 +6,7 @@ from factory.alchemy import SQLAlchemyModelFactory from faker import Faker from faker.providers import BaseProvider +from milvus.client.types import MetricType from mishards import db from mishards.models import Tables, TableFiles @@ -27,12 +28,12 @@ class TablesFactory(SQLAlchemyModelFactory): id = factory.Faker('random_number', digits=16, fix_len=True) table_id = factory.Faker('uuid4') - state = factory.Faker('random_element', elements=(0, 1, 2, 3)) + state = factory.Faker('random_element', elements=(0, 1)) dimension = factory.Faker('random_element', elements=(256, 512)) created_on = int(time.time()) index_file_size = 0 engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) - metric_type = factory.Faker('random_element', elements=(0, 1)) + metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) nlist = 16384 diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 113ec3ca20..e04965c12a 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -125,8 +125,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None routing = {} + p_span = None if self.tracer.empty else context.get_active_span().context with self.tracer.start_span('get_routing', - child_of=context.get_active_span().context): + child_of=p_span): routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) logger.info('Routing: {}'.format(routing)) @@ -145,9 +146,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn = self.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) - span = span if span else context.get_active_span().context + span = span if span else (None if self.tracer.empty else context.get_active_span().context) + with self.tracer.start_span('search_{}'.format(addr), - child_of=context.get_active_span().context): + child_of=span): ret = conn.search_vectors_in_files(table_name=query_params['table_id'], file_ids=query_params['file_ids'], query_records=vectors, @@ -160,7 +162,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): all_topk_results.append(ret) with self.tracer.start_span('do_search', - child_of=context.get_active_span().context) as span: + child_of=p_span) as span: with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) @@ -171,9 +173,12 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reverse = table_meta.metric_type == Types.MetricType.IP with self.tracer.start_span('do_merge', - child_of=context.get_active_span().context): + child_of=p_span): return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + def _create_table(self, table_schema): + return self.connection().create_table(table_schema) + @mark_grpc_method def CreateTable(self, request, context): _status, _table_schema = Parser.parse_proto_TableSchema(request) @@ -183,10 +188,13 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateTable {}'.format(_table_schema['table_name'])) - _status = self.connection().create_table(_table_schema) + _status = self._create_table(_table_schema) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _has_table(self, table_name, metadata=None): + return self.connection(metadata=metadata).has_table(table_name) + @mark_grpc_method def HasTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -199,15 +207,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) - _bool = self.connection(metadata={ - 'resp_class': milvus_pb2.BoolReply - }).has_table(_table_name) + _bool = self._has_table(_table_name, metadata={ + 'resp_class': milvus_pb2.BoolReply}) return milvus_pb2.BoolReply( status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), bool_reply=_bool ) + def _delete_table(self, table_name): + return self.connection().delete_table(table_name) + @mark_grpc_method def DropTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) @@ -217,10 +227,13 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('DropTable {}'.format(_table_name)) - _status = self.connection().delete_table(_table_name) + _status = self._delete_table(_table_name) return status_pb2.Status(error_code=_status.code, reason=_status.message) + def _create_index(self, table_name, index): + return self.connection().create_index(table_name, index) + @mark_grpc_method def CreateIndex(self, request, context): _status, unpacks = Parser.parse_proto_IndexParam(request) @@ -233,7 +246,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('CreateIndex {}'.format(_table_name)) # TODO: interface create_table incompleted - _status = self.connection().create_index(_table_name, _index) + _status = self._create_index(_table_name, _index) return status_pb2.Status(error_code=_status.code, reason=_status.message) @@ -298,7 +311,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( - status=status, + status=status_pb2.Status(error_code=status.error_code, reason=status.reason), topk_query_result=results ) return topk_result_list diff --git a/mishards/test_server.py b/mishards/test_server.py new file mode 100644 index 0000000000..e9a7c0d878 --- /dev/null +++ b/mishards/test_server.py @@ -0,0 +1,279 @@ +import logging +import pytest +import mock +import datetime +import random +import faker +import inspect +from milvus import Milvus +from milvus.client.types import Status, IndexType, MetricType +from milvus.client.Abstract import IndexParam, TableSchema +from milvus.grpc_gen import status_pb2, milvus_pb2 +from mishards import db, create_app, settings +from mishards.service_handler import ServiceHandler +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables + +logger = logging.getLogger(__name__) + +OK = Status(code=Status.SUCCESS, message='Success') +BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') + + +@pytest.mark.usefixtures('started_app') +class TestServer: + def client(self, port): + m = Milvus() + m.connect(host='localhost', port=port) + return m + + def test_server_start(self, started_app): + assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER + + def test_cmd(self, started_app): + ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, + '')) + status, _ = self.client(started_app.port).server_version() + assert status.OK() + + Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) + status, _ = self.client(started_app.port).server_version() + assert not status.OK() + + def test_drop_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + ServiceHandler._drop_index = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).drop_index(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client(started_app.port).drop_index(table_name) + assert not status.OK() + + def test_describe_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + index_type = IndexType.FLAT + nlist = 1 + index_param = IndexParam(table_name=table_name, + index_type=index_type, + nlist=nlist) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._describe_index = mock.MagicMock( + return_value=(OK, index_param)) + status, ret = self.client(started_app.port).describe_index(table_name) + assert status.OK() + assert ret._table_name == index_param._table_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client(started_app.port).describe_index(table_name) + assert not status.OK() + + def test_preload(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._preload_table = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).preload_table(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client(started_app.port).preload_table(table_name) + assert not status.OK() + + def test_delete_by_range(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + unpacked = table_name, datetime.datetime.today( + ), datetime.datetime.today() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(OK, unpacked)) + ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).delete_vectors_by_range( + *unpacked) + assert status.OK() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(BAD, unpacked)) + status = self.client(started_app.port).delete_vectors_by_range( + *unpacked) + assert not status.OK() + + def test_count_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + count = random.randint(100, 200) + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) + status, ret = self.client( + started_app.port).get_table_row_count(table_name) + assert status.OK() + assert ret == count + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client( + started_app.port).get_table_row_count(table_name) + assert not status.OK() + + def test_show_tables(self, started_app): + tables = ['t1', 't2'] + ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) + status, ret = self.client(started_app.port).show_tables() + assert status.OK() + assert ret == tables + + def test_describe_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + nlist = 1 + table_schema = TableSchema(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_schema.table_name)) + ServiceHandler._describe_table = mock.MagicMock( + return_value=(OK, table_schema)) + status, _ = self.client(started_app.port).describe_table(table_name) + assert status.OK() + + ServiceHandler._describe_table = mock.MagicMock( + return_value=(BAD, table_schema)) + status, _ = self.client(started_app.port).describe_table(table_name) + assert not status.OK() + + Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, + 'cmd')) + status, ret = self.client(started_app.port).describe_table(table_name) + assert not status.OK() + + def test_insert(self, started_app): + table_name = inspect.currentframe().f_code.co_name + vectors = [[random.random() for _ in range(16)] for _ in range(10)] + ids = [random.randint(1000000, 20000000) for _ in range(10)] + ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) + status, ret = self.client(started_app.port).add_vectors( + table_name=table_name, records=vectors) + assert status.OK() + assert ids == ret + + def test_create_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + unpacks = table_name, None + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, + unpacks)) + ServiceHandler._create_index = mock.MagicMock(return_value=OK) + status = self.client( + started_app.port).create_index(table_name=table_name) + assert status.OK() + + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, + None)) + status = self.client( + started_app.port).create_index(table_name=table_name) + assert not status.OK() + + def test_drop_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._delete_table = mock.MagicMock(return_value=OK) + status = self.client( + started_app.port).delete_table(table_name=table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client( + started_app.port).delete_table(table_name=table_name) + assert not status.OK() + + def test_has_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._has_table = mock.MagicMock(return_value=True) + has = self.client(started_app.port).has_table(table_name=table_name) + assert has + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + has = self.client(started_app.port).has_table(table_name=table_name) + assert not has + + def test_create_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + table_schema = dict(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + + ServiceHandler._create_table = mock.MagicMock(return_value=OK) + status = self.client(started_app.port).create_table(table_schema) + assert status.OK() + + Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, + None)) + status = self.client(started_app.port).create_table(table_schema) + assert not status.OK() + + def random_data(self, n, dimension): + return [[random.random() for _ in range(dimension)] for _ in range(n)] + + def test_search(self, started_app): + table_name = inspect.currentframe().f_code.co_name + to_index_cnt = random.randint(10, 20) + table = TablesFactory(table_id=table_name, state=Tables.NORMAL) + to_index_files = TableFilesFactory.create_batch( + to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) + topk = random.randint(5, 10) + nq = random.randint(5, 10) + param = { + 'table_name': table_name, + 'query_records': self.random_data(nq, table.dimension), + 'top_k': topk, + 'nprobe': 2049 + } + + result = [ + milvus_pb2.TopKQueryResult(query_result_arrays=[ + milvus_pb2.QueryResult(id=i, distance=random.random()) + for i in range(topk) + ]) for i in range(nq) + ] + + mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=result) + + table_schema = TableSchema(table_name=table_name, + index_file_size=table.index_file_size, + metric_type=table.metric_type, + dimension=table.dimension) + + status, _ = self.client(started_app.port).search_vectors(**param) + assert status.code == Status.ILLEGAL_ARGUMENT + + param['nprobe'] = 2048 + Milvus.describe_table = mock.MagicMock(return_value=(BAD, + table_schema)) + status, ret = self.client(started_app.port).search_vectors(**param) + assert status.code == Status.TABLE_NOT_EXISTS + + Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) + Milvus.search_vectors_in_files = mock.MagicMock( + return_value=mock_results) + + status, ret = self.client(started_app.port).search_vectors(**param) + assert status.OK() + assert len(ret) == nq diff --git a/tracing/__init__.py b/tracing/__init__.py index 5014309a52..a1974e2204 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,6 +1,13 @@ +from contextlib import contextmanager + def empty_server_interceptor_decorator(target_server, interceptor): return target_server +@contextmanager +def EmptySpan(*args, **kwargs): + yield None + return + class Tracer: def __init__(self, tracer=None, @@ -13,11 +20,17 @@ class Tracer: def decorate(self, server): return self.server_decorator(server, self.interceptor) + @property + def empty(self): + return self.tracer is None + def close(self): self.tracer and self.tracer.close() def start_span(self, operation_name=None, child_of=None, references=None, tags=None, start_time=None, ignore_active_span=False): + if self.empty: + return EmptySpan() return self.tracer.start_span(operation_name, child_of, references, tags, start_time, ignore_active_span) From 24b2e73e5ae132f0e2f0a391895b3031165098e7 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 17 Oct 2019 14:20:09 +0800 Subject: [PATCH 068/196] code style format --- conftest.py | 1 + mishards/grpc_utils/test_grpc.py | 4 ++-- mishards/service_handler.py | 3 +-- mishards/test_server.py | 2 +- tracing/__init__.py | 19 +++++++++++++------ tracing/factory.py | 21 ++++++++++++--------- 6 files changed, 30 insertions(+), 20 deletions(-) diff --git a/conftest.py b/conftest.py index 1aba5b32cf..ebe8276cea 100644 --- a/conftest.py +++ b/conftest.py @@ -16,6 +16,7 @@ def app(request): db.drop_all() + @pytest.fixture def started_app(app): app.on_pre_run() diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py index 314fccfe00..9af09e5d0d 100644 --- a/mishards/grpc_utils/test_grpc.py +++ b/mishards/grpc_utils/test_grpc.py @@ -3,20 +3,20 @@ import opentracing from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method from milvus.grpc_gen import status_pb2, milvus_pb2 - logger = logging.getLogger(__name__) class FakeTracer(opentracing.Tracer): pass + class FakeSpan(opentracing.Span): def __init__(self, context, tracer, **kwargs): super(FakeSpan, self).__init__(tracer, context) self.reset() def set_tag(self, key, value): - self.tags.append({key:value}) + self.tags.append({key: value}) def log_kv(self, key_values, timestamp=None): self.logs.append(key_values) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index e04965c12a..0172f73126 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -232,7 +232,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status_pb2.Status(error_code=_status.code, reason=_status.message) def _create_index(self, table_name, index): - return self.connection().create_index(table_name, index) + return self.connection().create_index(table_name, index) @mark_grpc_method def CreateIndex(self, request, context): @@ -378,7 +378,6 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): status=status_pb2.Status(error_code=_status.code, reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) - def _get_server_version(self, metadata=None): return self.connection(metadata=metadata).server_version() diff --git a/mishards/test_server.py b/mishards/test_server.py index e9a7c0d878..a2677847da 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -254,7 +254,7 @@ class TestServer: mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( error_code=status_pb2.SUCCESS, reason="Success"), - topk_query_result=result) + topk_query_result=result) table_schema = TableSchema(table_name=table_name, index_file_size=table.index_file_size, diff --git a/tracing/__init__.py b/tracing/__init__.py index a1974e2204..64a5b50d15 100644 --- a/tracing/__init__.py +++ b/tracing/__init__.py @@ -1,8 +1,10 @@ from contextlib import contextmanager + def empty_server_interceptor_decorator(target_server, interceptor): return target_server + @contextmanager def EmptySpan(*args, **kwargs): yield None @@ -10,7 +12,8 @@ def EmptySpan(*args, **kwargs): class Tracer: - def __init__(self, tracer=None, + def __init__(self, + tracer=None, interceptor=None, server_decorator=empty_server_interceptor_decorator): self.tracer = tracer @@ -27,10 +30,14 @@ class Tracer: def close(self): self.tracer and self.tracer.close() - def start_span(self, operation_name=None, - child_of=None, references=None, tags=None, - start_time=None, ignore_active_span=False): + def start_span(self, + operation_name=None, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False): if self.empty: return EmptySpan() - return self.tracer.start_span(operation_name, child_of, - references, tags, start_time, ignore_active_span) + return self.tracer.start_span(operation_name, child_of, references, + tags, start_time, ignore_active_span) diff --git a/tracing/factory.py b/tracing/factory.py index 61cd75fcd6..14fcde2eb3 100644 --- a/tracing/factory.py +++ b/tracing/factory.py @@ -3,20 +3,23 @@ from jaeger_client import Config from grpc_opentracing.grpcext import intercept_server from grpc_opentracing import open_tracing_server_interceptor -from tracing import (Tracer, - empty_server_interceptor_decorator) +from tracing import (Tracer, empty_server_interceptor_decorator) logger = logging.getLogger(__name__) class TracerFactory: @classmethod - def new_tracer(cls, tracer_type, tracer_config, span_decorator=None, **kwargs): + def new_tracer(cls, + tracer_type, + tracer_config, + span_decorator=None, + **kwargs): if not tracer_type: return Tracer() config = tracer_config.TRACING_CONFIG service_name = tracer_config.TRACING_SERVICE_NAME - validate=tracer_config.TRACING_VALIDATE + validate = tracer_config.TRACING_VALIDATE # if not tracer_type: # tracer_type = 'jaeger' # config = tracer_config.DEFAULT_TRACING_CONFIG @@ -24,13 +27,13 @@ class TracerFactory: if tracer_type.lower() == 'jaeger': config = Config(config=config, service_name=service_name, - validate=validate - ) + validate=validate) tracer = config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor(tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) + tracer_interceptor = open_tracing_server_interceptor( + tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) From 560c4310ae15a8326ca90e1df153e89fc4befb6b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 10:19:39 +0800 Subject: [PATCH 069/196] small refactor --- mishards/service_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 0172f73126..1396466568 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -405,7 +405,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply ) - def _show_tables(self): + def _show_tables(self, metadata=None): return self.connection(metadata=metadata).show_tables() @mark_grpc_method @@ -414,7 +414,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): metadata = { 'resp_class': milvus_pb2.TableName } - _status, _results = self._show_tables() + _status, _results = self._show_tables(metadata=metadata) return milvus_pb2.TableNameList( status=status_pb2.Status(error_code=_status.code, reason=_status.message), From a3409be0dc4330923dd5bab2d647d1f11dc3d538 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:38:19 +0800 Subject: [PATCH 070/196] add router in impl --- mishards/__init__.py | 5 +- mishards/routings.py | 81 +++++++++ mishards/server.py | 27 ++- mishards/service_handler.py | 331 +++++++++++++++++------------------- mishards/settings.py | 2 + mishards/utilities.py | 20 +++ 6 files changed, 287 insertions(+), 179 deletions(-) create mode 100644 mishards/routings.py create mode 100644 mishards/utilities.py diff --git a/mishards/__init__.py b/mishards/__init__.py index 4bd77d8c60..759e8c2e5a 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -27,7 +27,10 @@ def create_app(testing_config=None): tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, span_decorator=GrpcSpanDecorator()) - grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, discover=discover) + from mishards.routings import RouterFactory + router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) + + grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) from mishards import exception_handlers diff --git a/mishards/routings.py b/mishards/routings.py new file mode 100644 index 0000000000..a61352f40b --- /dev/null +++ b/mishards/routings.py @@ -0,0 +1,81 @@ +import logging +from sqlalchemy import exc as sqlalchemy_exc +from sqlalchemy import and_ + +from mishards import exceptions, db +from mishards.hash_ring import HashRing +from mishards.models import Tables + +logger = logging.getLogger(__name__) + + +class RouteManager: + ROUTER_CLASSES = {} + + @classmethod + def register_router_class(cls, target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.ROUTER_CLASSES[name] = target + return target + + @classmethod + def get_router_class(cls, name): + return cls.ROUTER_CLASSES.get(name, None) + + +class RouterFactory: + @classmethod + def new_router(cls, name, conn_mgr, **kwargs): + router_class = RouteManager.get_router_class(name) + assert router_class + return router_class(conn_mgr, **kwargs) + + +class RouterMixin: + def __init__(self, conn_mgr): + self.conn_mgr = conn_mgr + + def routing(self, table_name, metadata=None, **kwargs): + raise NotImplemented() + + +@RouteManager.register_router_class +class FileBasedHashRingRouter(RouterMixin): + NAME = 'FileBasedHashRingRouter' + + def __init__(self, conn_mgr, **kwargs): + super(FileBasedHashRingRouter, self).__init__(conn_mgr) + + def routing(self, table_name, metadata=None, **kwargs): + range_array = kwargs.pop('range_array', None) + return self._route(table_name, range_array, metadata, **kwargs) + + def _route(self, table_name, range_array, metadata=None, **kwargs): + # PXU TODO: Implement Thread-local Context + try: + table = db.Session.query(Tables).filter( + and_(Tables.table_id == table_name, + Tables.state != Tables.TO_DELETE)).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) + + if not table: + raise exceptions.TableNotFoundError(table_name, metadata=metadata) + files = table.files_to_search(range_array) + + servers = self.conn_mgr.conn_names + logger.info('Available servers: {}'.format(servers)) + + ring = HashRing(servers) + + routing = {} + + for f in files: + target_host = ring.get_node(str(f.id)) + sub = routing.get(target_host, None) + if not sub: + routing[target_host] = {'table_id': table_name, 'file_ids': []} + routing[target_host]['file_ids'].append(str(f.id)) + + return routing diff --git a/mishards/server.py b/mishards/server.py index dcaacd0fbc..20be8f1746 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -22,17 +22,24 @@ class Server: self.error_handlers = {} self.exit_flag = False - def init_app(self, conn_mgr, tracer, discover, port=19530, max_workers=10, **kwargs): + def init_app(self, + conn_mgr, + tracer, + router, + discover, + port=19530, + max_workers=10, + **kwargs): self.port = int(port) self.conn_mgr = conn_mgr self.tracer = tracer + self.router = router self.discover = discover self.server_impl = grpc.server( thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), - (cygrpc.ChannelArgKey.max_receive_message_length, -1)] - ) + (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) self.server_impl = self.tracer.decorate(self.server_impl) @@ -43,8 +50,8 @@ class Server: url = urlparse(woserver) ip = socket.gethostbyname(url.hostname) socket.inet_pton(socket.AF_INET, ip) - self.conn_mgr.register('WOSERVER', - '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + self.conn_mgr.register( + 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) def register_pre_run_handler(self, func): logger.info('Regiterring {} into server pre_run_handlers'.format(func)) @@ -65,9 +72,11 @@ class Server: def errorhandler(self, exception): if inspect.isclass(exception) and issubclass(exception, Exception): + def wrapper(func): self.error_handlers[exception] = func return func + return wrapper return exception @@ -78,8 +87,12 @@ class Server: def start(self, port=None): handler_class = self.decorate_handler(ServiceHandler) - add_MilvusServiceServicer_to_server(handler_class(conn_mgr=self.conn_mgr, tracer=self.tracer), self.server_impl) - self.server_impl.add_insecure_port("[::]:{}".format(str(port or self._port))) + add_MilvusServiceServicer_to_server( + handler_class(conn_mgr=self.conn_mgr, + tracer=self.tracer, + router=self.router), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format( + str(port or self._port))) self.server_impl.start() def run(self, port): diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 1396466568..e26f2bfd74 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -3,9 +3,6 @@ import time import datetime from collections import defaultdict -from sqlalchemy import and_ -from sqlalchemy import exc as sqlalchemy_exc - from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult @@ -15,8 +12,7 @@ from milvus.client import types as Types from mishards import (db, settings, exceptions) from mishards.grpc_utils import mark_grpc_method from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser -from mishards.models import Tables, TableFiles -from mishards.hash_ring import HashRing +from mishards import utilities logger = logging.getLogger(__name__) @@ -24,11 +20,12 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, tracer, *args, **kwargs): + def __init__(self, conn_mgr, tracer, router, *args, **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} self.error_handlers = {} self.tracer = tracer + self.router = router def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) @@ -43,56 +40,9 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): conn.on_connect(metadata=metadata) return conn.conn - def _format_date(self, start, end): - return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) - - def _range_to_date(self, range_obj, metadata=None): - try: - start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') - end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') - assert start < end - except (ValueError, AssertionError): - raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( - range_obj.start_date, range_obj.end_date - ), metadata=metadata) - - return self._format_date(start, end) - - def _get_routing_file_ids(self, table_id, range_array, metadata=None): - # PXU TODO: Implement Thread-local Context - try: - table = db.Session.query(Tables).filter(and_( - Tables.table_id == table_id, - Tables.state != Tables.TO_DELETE - )).first() - except sqlalchemy_exc.SQLAlchemyError as e: - raise exceptions.DBError(message=str(e), metadata=metadata) - - if not table: - raise exceptions.TableNotFoundError(table_id, metadata=metadata) - files = table.files_to_search(range_array) - - servers = self.conn_mgr.conn_names - logger.info('Available servers: {}'.format(servers)) - - ring = HashRing(servers) - - routing = {} - - for f in files: - target_host = ring.get_node(str(f.id)) - sub = routing.get(target_host, None) - if not sub: - routing[target_host] = { - 'table_id': table_id, - 'file_ids': [] - } - routing[target_host]['file_ids'].append(str(f.id)) - - return routing - def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): - status = status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") + status = status_pb2.Status(error_code=status_pb2.SUCCESS, + reason="Success") if not files_n_topk_results: return status, [] @@ -103,10 +53,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if isinstance(files_collection, tuple): status, _ = files_collection return status, [] - for request_pos, each_request_results in enumerate(files_collection.topk_query_result): - request_results[request_pos].extend(each_request_results.query_result_arrays) - request_results[request_pos] = sorted(request_results[request_pos], key=lambda x: x.distance, - reverse=reverse)[:topk] + for request_pos, each_request_results in enumerate( + files_collection.topk_query_result): + request_results[request_pos].extend( + each_request_results.query_result_arrays) + request_results[request_pos] = sorted( + request_results[request_pos], + key=lambda x: x.distance, + reverse=reverse)[:topk] calc_time = time.time() - calc_time logger.info('Merge takes {}'.format(calc_time)) @@ -120,15 +74,27 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): return status, topk_query_result - def _do_query(self, context, table_id, table_meta, vectors, topk, nprobe, range_array=None, **kwargs): + def _do_query(self, + context, + table_id, + table_meta, + vectors, + topk, + nprobe, + range_array=None, + **kwargs): metadata = kwargs.get('metadata', None) - range_array = [self._range_to_date(r, metadata=metadata) for r in range_array] if range_array else None + range_array = [ + utilities.range_to_date(r, metadata=metadata) for r in range_array + ] if range_array else None routing = {} - p_span = None if self.tracer.empty else context.get_active_span().context - with self.tracer.start_span('get_routing', - child_of=p_span): - routing = self._get_routing_file_ids(table_id, range_array, metadata=metadata) + p_span = None if self.tracer.empty else context.get_active_span( + ).context + with self.tracer.start_span('get_routing', child_of=p_span): + routing = self.router.routing(table_id, + range_array=range_array, + metadata=metadata) logger.info('Routing: {}'.format(routing)) metadata = kwargs.get('metadata', None) @@ -139,42 +105,51 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): workers = settings.SEARCH_WORKER_SIZE def search(addr, query_params, vectors, topk, nprobe, **kwargs): - logger.info('Send Search Request: addr={};params={};nq={};topk={};nprobe={}'.format( - addr, query_params, len(vectors), topk, nprobe - )) + logger.info( + 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' + .format(addr, query_params, len(vectors), topk, nprobe)) conn = self.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) - span = span if span else (None if self.tracer.empty else context.get_active_span().context) + span = span if span else (None if self.tracer.empty else + context.get_active_span().context) with self.tracer.start_span('search_{}'.format(addr), child_of=span): - ret = conn.search_vectors_in_files(table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy=True) + ret = conn.search_vectors_in_files( + table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy=True) end = time.time() logger.info('search_vectors_in_files takes: {}'.format(end - start)) all_topk_results.append(ret) - with self.tracer.start_span('do_search', - child_of=p_span) as span: + with self.tracer.start_span('do_search', child_of=p_span) as span: with ThreadPoolExecutor(max_workers=workers) as pool: for addr, params in routing.items(): - res = pool.submit(search, addr, params, vectors, topk, nprobe, span=span) + res = pool.submit(search, + addr, + params, + vectors, + topk, + nprobe, + span=span) rs.append(res) for res in rs: res.result() reverse = table_meta.metric_type == Types.MetricType.IP - with self.tracer.start_span('do_merge', - child_of=p_span): - return self._do_merge(all_topk_results, topk, reverse=reverse, metadata=metadata) + with self.tracer.start_span('do_merge', child_of=p_span): + return self._do_merge(all_topk_results, + topk, + reverse=reverse, + metadata=metadata) def _create_table(self, table_schema): return self.connection().create_table(table_schema) @@ -184,13 +159,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_schema = Parser.parse_proto_TableSchema(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('CreateTable {}'.format(_table_schema['table_name'])) _status = self._create_table(_table_schema) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _has_table(self, table_name, metadata=None): return self.connection(metadata=metadata).has_table(table_name) @@ -200,20 +177,18 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return milvus_pb2.BoolReply( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - bool_reply=False - ) + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=False) logger.info('HasTable {}'.format(_table_name)) - _bool = self._has_table(_table_name, metadata={ - 'resp_class': milvus_pb2.BoolReply}) + _bool = self._has_table(_table_name, + metadata={'resp_class': milvus_pb2.BoolReply}) - return milvus_pb2.BoolReply( - status=status_pb2.Status(error_code=status_pb2.SUCCESS, reason="OK"), - bool_reply=_bool - ) + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="OK"), + bool_reply=_bool) def _delete_table(self, table_name): return self.connection().delete_table(table_name) @@ -223,13 +198,15 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('DropTable {}'.format(_table_name)) _status = self._delete_table(_table_name) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _create_index(self, table_name, index): return self.connection().create_index(table_name, index) @@ -239,7 +216,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, unpacks = Parser.parse_proto_IndexParam(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) _table_name, _index = unpacks @@ -248,21 +226,22 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): # TODO: interface create_table incompleted _status = self._create_index(_table_name, _index) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _add_vectors(self, param, metadata=None): - return self.connection(metadata=metadata).add_vectors(None, None, insert_param=param) + return self.connection(metadata=metadata).add_vectors( + None, None, insert_param=param) @mark_grpc_method def Insert(self, request, context): logger.info('Insert') # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self._add_vectors(metadata={ - 'resp_class': milvus_pb2.VectorIds}, param=request) - return milvus_pb2.VectorIds( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - vector_id_array=_ids - ) + _status, _ids = self._add_vectors( + metadata={'resp_class': milvus_pb2.VectorIds}, param=request) + return milvus_pb2.VectorIds(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + vector_id_array=_ids) @mark_grpc_method def Search(self, request, context): @@ -272,22 +251,23 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): topk = request.topk nprobe = request.nprobe - logger.info('Search {}: topk={} nprobe={}'.format(table_name, topk, nprobe)) + logger.info('Search {}: topk={} nprobe={}'.format( + table_name, topk, nprobe)) - metadata = { - 'resp_class': milvus_pb2.TopKQueryResultList - } + metadata = {'resp_class': milvus_pb2.TopKQueryResultList} if nprobe > self.MAX_NPROBE or nprobe <= 0: - raise exceptions.InvalidArgumentError(message='Invalid nprobe: {}'.format(nprobe), - metadata=metadata) + raise exceptions.InvalidArgumentError( + message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) table_meta = self.table_meta.get(table_name, None) if not table_meta: - status, info = self.connection(metadata=metadata).describe_table(table_name) + status, info = self.connection( + metadata=metadata).describe_table(table_name) if not status.OK(): - raise exceptions.TableNotFoundError(table_name, metadata=metadata) + raise exceptions.TableNotFoundError(table_name, + metadata=metadata) self.table_meta[table_name] = info table_meta = info @@ -304,16 +284,22 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_range_array.append( Range(query_range.start_value, query_range.end_value)) - status, results = self._do_query(context, table_name, table_meta, query_record_array, topk, - nprobe, query_range_array, metadata=metadata) + status, results = self._do_query(context, + table_name, + table_meta, + query_record_array, + topk, + nprobe, + query_range_array, + metadata=metadata) now = time.time() logger.info('SearchVector takes: {}'.format(now - start)) topk_result_list = milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status.error_code, reason=status.reason), - topk_query_result=results - ) + status=status_pb2.Status(error_code=status.error_code, + reason=status.reason), + topk_query_result=results) return topk_result_list @mark_grpc_method @@ -328,16 +314,14 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return milvus_pb2.TableSchema( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - ) + return milvus_pb2.TableSchema(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), ) - metadata = { - 'resp_class': milvus_pb2.TableSchema - } + metadata = {'resp_class': milvus_pb2.TableSchema} logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self._describe_table(metadata=metadata, table_name=_table_name) + _status, _table = self._describe_table(metadata=metadata, + table_name=_table_name) if _status.OK(): return milvus_pb2.TableSchema( @@ -345,37 +329,38 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): index_file_size=_table.index_file_size, dimension=_table.dimension, metric_type=_table.metric_type, - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), ) return milvus_pb2.TableSchema( table_name=_table_name, - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), ) def _count_table(self, table_name, metadata=None): - return self.connection(metadata=metadata).get_table_row_count(table_name) + return self.connection( + metadata=metadata).get_table_row_count(table_name) @mark_grpc_method def CountTable(self, request, context): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - status = status_pb2.Status(error_code=_status.code, reason=_status.message) + status = status_pb2.Status(error_code=_status.code, + reason=_status.message) - return milvus_pb2.TableRowCount( - status=status - ) + return milvus_pb2.TableRowCount(status=status) logger.info('CountTable {}'.format(_table_name)) - metadata = { - 'resp_class': milvus_pb2.TableRowCount - } + metadata = {'resp_class': milvus_pb2.TableRowCount} _status, _count = self._count_table(_table_name, metadata=metadata) return milvus_pb2.TableRowCount( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), table_row_count=_count if isinstance(_count, int) else -1) def _get_server_version(self, metadata=None): @@ -387,23 +372,20 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('Cmd: {}'.format(_cmd)) if not _status.OK(): - return milvus_pb2.StringReply( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) - metadata = { - 'resp_class': milvus_pb2.StringReply - } + metadata = {'resp_class': milvus_pb2.StringReply} if _cmd == 'version': _status, _reply = self._get_server_version(metadata=metadata) else: - _status, _reply = self.connection(metadata=metadata).server_status() + _status, _reply = self.connection( + metadata=metadata).server_status() - return milvus_pb2.StringReply( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - string_reply=_reply - ) + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + string_reply=_reply) def _show_tables(self, metadata=None): return self.connection(metadata=metadata).show_tables() @@ -411,18 +393,17 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): @mark_grpc_method def ShowTables(self, request, context): logger.info('ShowTables') - metadata = { - 'resp_class': milvus_pb2.TableName - } + metadata = {'resp_class': milvus_pb2.TableName} _status, _results = self._show_tables(metadata=metadata) - return milvus_pb2.TableNameList( - status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_names=_results - ) + return milvus_pb2.TableNameList(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_names=_results) def _delete_by_range(self, table_name, start_date, end_date): - return self.connection().delete_vectors_by_range(table_name, start_date, end_date) + return self.connection().delete_vectors_by_range(table_name, + start_date, + end_date) @mark_grpc_method def DeleteByRange(self, request, context): @@ -430,13 +411,16 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): Parser.parse_proto_DeleteByRangeParam(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) _table_name, _start_date, _end_date = unpacks - logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, _end_date)) + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, + _end_date)) _status = self._delete_by_range(_table_name, _start_date, _end_date) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _preload_table(self, table_name): return self.connection().preload_table(table_name) @@ -446,11 +430,13 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('PreloadTable {}'.format(_table_name)) _status = self._preload_table(_table_name) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) def _describe_index(self, table_name, metadata=None): return self.connection(metadata=metadata).describe_index(table_name) @@ -460,21 +446,22 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return milvus_pb2.IndexParam( - status=status_pb2.Status(error_code=_status.code, reason=_status.message) - ) + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) - metadata = { - 'resp_class': milvus_pb2.IndexParam - } + metadata = {'resp_class': milvus_pb2.IndexParam} logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self._describe_index(table_name=_table_name, metadata=metadata) + _status, _index_param = self._describe_index(table_name=_table_name, + metadata=metadata) - _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) + _index = milvus_pb2.Index(index_type=_index_param._index_type, + nlist=_index_param._nlist) - return milvus_pb2.IndexParam(status=status_pb2.Status(error_code=_status.code, reason=_status.message), - table_name=_table_name, index=_index) + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_name=_table_name, + index=_index) def _drop_index(self, table_name): return self.connection().drop_index(table_name) @@ -484,8 +471,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _table_name = Parser.parse_proto_TableName(request) if not _status.OK(): - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) logger.info('DropIndex {}'.format(_table_name)) _status = self._drop_index(_table_name) - return status_pb2.Status(error_code=_status.code, reason=_status.message) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) diff --git a/mishards/settings.py b/mishards/settings.py index c9b62717d4..5e81a1a8ad 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -73,12 +73,14 @@ class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) TRACING_TYPE = env.str('TRACING_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') class TestingConfig(DefaultConfig): SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') if __name__ == '__main__': diff --git a/mishards/utilities.py b/mishards/utilities.py new file mode 100644 index 0000000000..c08d0d42df --- /dev/null +++ b/mishards/utilities.py @@ -0,0 +1,20 @@ +import datetime +from mishards import exceptions + + +def format_date(self, start, end): + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, + (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) + + +def range_to_date(self, range_obj, metadata=None): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start < end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date), + metadata=metadata) + + return self.format_date(start, end) From fb5e6ab3b809754fd425770fd5cf48a704135ad0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:46:09 +0800 Subject: [PATCH 071/196] refactor max workers in handler --- mishards/service_handler.py | 8 ++++---- mishards/settings.py | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index e26f2bfd74..669d96802a 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -3,6 +3,7 @@ import time import datetime from collections import defaultdict +import multiprocessing from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult @@ -20,12 +21,13 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, tracer, router, *args, **kwargs): + def __init__(self, conn_mgr, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): self.conn_mgr = conn_mgr self.table_meta = {} self.error_handlers = {} self.tracer = tracer self.router = router + self.max_workers = max_workers def connection(self, metadata=None): conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) @@ -102,8 +104,6 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): rs = [] all_topk_results = [] - workers = settings.SEARCH_WORKER_SIZE - def search(addr, query_params, vectors, topk, nprobe, **kwargs): logger.info( 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' @@ -130,7 +130,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): all_topk_results.append(ret) with self.tracer.start_span('do_search', child_of=p_span) as span: - with ThreadPoolExecutor(max_workers=workers) as pool: + with ThreadPoolExecutor(max_workers=self.max_workers) as pool: for addr, params in routing.items(): res = pool.submit(search, addr, diff --git a/mishards/settings.py b/mishards/settings.py index 5e81a1a8ad..fd07d9d436 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -23,7 +23,6 @@ config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) -SEARCH_WORKER_SIZE = env.int('SEARCH_WORKER_SIZE', 10) SERVER_PORT = env.int('SERVER_PORT', 19530) WOSERVER = env.str('WOSERVER') From bafa336410619817bb733c805f90ba3428c4cdf1 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:55:22 +0800 Subject: [PATCH 072/196] change retry count logic --- mishards/connections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/connections.py b/mishards/connections.py index 7db271381c..915454711f 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -44,7 +44,7 @@ class Connection: if self.on_retry_func: self.on_retry_func(self) else: - logger.warning('{} is retrying {}'.format(self, self.retried)) + self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) def on_connect(self, metadata=None): while not self.connected and self.can_retry: From 3fb602c83fffea7dd39dd46cdd93a00b3ed98c32 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:55:34 +0800 Subject: [PATCH 073/196] change log format --- utils/logger_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/logger_helper.py b/utils/logger_helper.py index 55ce3206ab..b4e3b9c5b6 100644 --- a/utils/logger_helper.py +++ b/utils/logger_helper.py @@ -73,10 +73,10 @@ def config(log_level, log_path, name, tz='UTC'): 'disable_existing_loggers': False, 'formatters': { 'default': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)' + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', }, 'colorful_console': { - 'format': '[%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)', + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', '()': ColorfulFormatter, }, }, From 4231328e0e75cdcc4cba55e2f340c09d40e5d34f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 13:57:57 +0800 Subject: [PATCH 074/196] smaill code changes for logging --- mishards/__init__.py | 1 - sd/kubernetes_provider.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/mishards/__init__.py b/mishards/__init__.py index 759e8c2e5a..7db3d8cb5e 100644 --- a/mishards/__init__.py +++ b/mishards/__init__.py @@ -12,7 +12,6 @@ grpc_server = Server() def create_app(testing_config=None): config = testing_config if testing_config else settings.DefaultConfig db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) - logger.info(db) from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index 9a15b2fa78..ca593a3682 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -170,7 +170,7 @@ class EventHandler(threading.Thread): event['pod'])) return elif try_cnt <= 0 and not pod.status.pod_ip: - logger.warn('NoPodIPFoundError') + logger.warning('NoPodIPFoundError') return logger.info('Register POD {} with IP {}'.format( From 2b8a6f43debb99e904968fb13cc351b5d0b32dbd Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 16:26:53 +0800 Subject: [PATCH 075/196] set test sql uri default value --- mishards/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/settings.py b/mishards/settings.py index fd07d9d436..773c04f083 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -76,7 +76,7 @@ class DefaultConfig: class TestingConfig(DefaultConfig): - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI') + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') From 9b2a9193908443f1a5c545cc01b5e5953e969383 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 17:12:30 +0800 Subject: [PATCH 076/196] ignore pyc files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 8919efeb01..60d9da8c38 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ .env .coverage +*.pyc cov_html/ __pycache__/ From c40b72df960b464756c62e52a9a18c89e3c3a40b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 17:12:58 +0800 Subject: [PATCH 077/196] change heartbeat log --- mishards/connections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/connections.py b/mishards/connections.py index 915454711f..618690a099 100644 --- a/mishards/connections.py +++ b/mishards/connections.py @@ -114,6 +114,7 @@ class ConnectionMgr: return rconn def on_new_meta(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) self.metas[name] = url def on_duplicate_meta(self, name, url): @@ -139,7 +140,6 @@ class ConnectionMgr: logger.warning('Non-existed meta: {}'.format(name)) def register(self, name, url): - logger.info('Register Connection: name={};url={}'.format(name, url)) meta = self.metas.get(name) if not meta: return self.on_new_meta(name, url) From bdff52021d115facf1a6f4ce8c54759b370e1a60 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 18 Oct 2019 17:13:28 +0800 Subject: [PATCH 078/196] db session bug fix for multi-threading scenario --- mishards/db_base.py | 3 +++ mishards/routings.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/mishards/db_base.py b/mishards/db_base.py index 6fb3aef4e1..5f2eee9ba1 100644 --- a/mishards/db_base.py +++ b/mishards/db_base.py @@ -42,6 +42,9 @@ class DB: def Session(self): return self.session_factory() + def remove_session(self): + self.session_factory.remove() + def drop_all(self): self.Model.metadata.drop_all(self.engine) diff --git a/mishards/routings.py b/mishards/routings.py index a61352f40b..f04f3d2484 100644 --- a/mishards/routings.py +++ b/mishards/routings.py @@ -53,6 +53,7 @@ class FileBasedHashRingRouter(RouterMixin): def _route(self, table_name, range_array, metadata=None, **kwargs): # PXU TODO: Implement Thread-local Context + # PXU TODO: Session life mgt try: table = db.Session.query(Tables).filter( and_(Tables.table_id == table_name, @@ -63,6 +64,7 @@ class FileBasedHashRingRouter(RouterMixin): if not table: raise exceptions.TableNotFoundError(table_name, metadata=metadata) files = table.files_to_search(range_array) + db.remove_session() servers = self.conn_mgr.conn_names logger.info('Available servers: {}'.format(servers)) From 46210920818662372a22d184823dd0370cbf7f27 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 11:21:53 +0800 Subject: [PATCH 079/196] remove conn_mgr from handler --- mishards/routings.py | 13 ++++++++++ mishards/server.py | 3 +-- mishards/service_handler.py | 48 +++++++++++++------------------------ 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/mishards/routings.py b/mishards/routings.py index f04f3d2484..823972726f 100644 --- a/mishards/routings.py +++ b/mishards/routings.py @@ -39,6 +39,19 @@ class RouterMixin: def routing(self, table_name, metadata=None, **kwargs): raise NotImplemented() + def connection(self, metadata=None): + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) + if conn: + conn.on_connect(metadata=metadata) + return conn.conn + + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) + if not conn: + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) + return conn.conn + @RouteManager.register_router_class class FileBasedHashRingRouter(RouterMixin): diff --git a/mishards/server.py b/mishards/server.py index 20be8f1746..6eb0e92582 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -88,8 +88,7 @@ class Server: def start(self, port=None): handler_class = self.decorate_handler(ServiceHandler) add_MilvusServiceServicer_to_server( - handler_class(conn_mgr=self.conn_mgr, - tracer=self.tracer, + handler_class(tracer=self.tracer, router=self.router), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format( str(port or self._port))) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 669d96802a..04e74415a1 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -21,27 +21,13 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 - def __init__(self, conn_mgr, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): - self.conn_mgr = conn_mgr + def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): self.table_meta = {} self.error_handlers = {} self.tracer = tracer self.router = router self.max_workers = max_workers - def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) - if conn: - conn.on_connect(metadata=metadata) - return conn.conn - - def query_conn(self, name, metadata=None): - conn = self.conn_mgr.conn(name, metadata=metadata) - if not conn: - raise exceptions.ConnectionNotFoundError(name, metadata=metadata) - conn.on_connect(metadata=metadata) - return conn.conn - def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): status = status_pb2.Status(error_code=status_pb2.SUCCESS, reason="Success") @@ -109,7 +95,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' .format(addr, query_params, len(vectors), topk, nprobe)) - conn = self.query_conn(addr, metadata=metadata) + conn = self.router.query_conn(addr, metadata=metadata) start = time.time() span = kwargs.get('span', None) span = span if span else (None if self.tracer.empty else @@ -152,7 +138,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): metadata=metadata) def _create_table(self, table_schema): - return self.connection().create_table(table_schema) + return self.router.connection().create_table(table_schema) @mark_grpc_method def CreateTable(self, request, context): @@ -170,7 +156,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _has_table(self, table_name, metadata=None): - return self.connection(metadata=metadata).has_table(table_name) + return self.router.connection(metadata=metadata).has_table(table_name) @mark_grpc_method def HasTable(self, request, context): @@ -191,7 +177,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): bool_reply=_bool) def _delete_table(self, table_name): - return self.connection().delete_table(table_name) + return self.router.connection().delete_table(table_name) @mark_grpc_method def DropTable(self, request, context): @@ -209,7 +195,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _create_index(self, table_name, index): - return self.connection().create_index(table_name, index) + return self.router.connection().create_index(table_name, index) @mark_grpc_method def CreateIndex(self, request, context): @@ -230,7 +216,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _add_vectors(self, param, metadata=None): - return self.connection(metadata=metadata).add_vectors( + return self.router.connection(metadata=metadata).add_vectors( None, None, insert_param=param) @mark_grpc_method @@ -263,7 +249,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_meta = self.table_meta.get(table_name, None) if not table_meta: - status, info = self.connection( + status, info = self.router.connection( metadata=metadata).describe_table(table_name) if not status.OK(): raise exceptions.TableNotFoundError(table_name, @@ -307,7 +293,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): raise NotImplemented() def _describe_table(self, table_name, metadata=None): - return self.connection(metadata=metadata).describe_table(table_name) + return self.router.connection(metadata=metadata).describe_table(table_name) @mark_grpc_method def DescribeTable(self, request, context): @@ -340,7 +326,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): ) def _count_table(self, table_name, metadata=None): - return self.connection( + return self.router.connection( metadata=metadata).get_table_row_count(table_name) @mark_grpc_method @@ -364,7 +350,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_row_count=_count if isinstance(_count, int) else -1) def _get_server_version(self, metadata=None): - return self.connection(metadata=metadata).server_version() + return self.router.connection(metadata=metadata).server_version() @mark_grpc_method def Cmd(self, request, context): @@ -380,7 +366,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): if _cmd == 'version': _status, _reply = self._get_server_version(metadata=metadata) else: - _status, _reply = self.connection( + _status, _reply = self.router.connection( metadata=metadata).server_status() return milvus_pb2.StringReply(status=status_pb2.Status( @@ -388,7 +374,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): string_reply=_reply) def _show_tables(self, metadata=None): - return self.connection(metadata=metadata).show_tables() + return self.router.connection(metadata=metadata).show_tables() @mark_grpc_method def ShowTables(self, request, context): @@ -401,7 +387,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): table_names=_results) def _delete_by_range(self, table_name, start_date, end_date): - return self.connection().delete_vectors_by_range(table_name, + return self.router.connection().delete_vectors_by_range(table_name, start_date, end_date) @@ -423,7 +409,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _preload_table(self, table_name): - return self.connection().preload_table(table_name) + return self.router.connection().preload_table(table_name) @mark_grpc_method def PreloadTable(self, request, context): @@ -439,7 +425,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): reason=_status.message) def _describe_index(self, table_name, metadata=None): - return self.connection(metadata=metadata).describe_index(table_name) + return self.router.connection(metadata=metadata).describe_index(table_name) @mark_grpc_method def DescribeIndex(self, request, context): @@ -464,7 +450,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): index=_index) def _drop_index(self, table_name): - return self.connection().drop_index(table_name) + return self.router.connection().drop_index(table_name) @mark_grpc_method def DropIndex(self, request, context): From 43bc2cc60c8b1c5428cb990f7300c91f81a63ead Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 11:21:53 +0800 Subject: [PATCH 080/196] remove conn_mgr from handler remove conn_mgr from handler --- mishards/service_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 04e74415a1..485aa8b211 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -388,8 +388,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): def _delete_by_range(self, table_name, start_date, end_date): return self.router.connection().delete_vectors_by_range(table_name, - start_date, - end_date) + start_date, + end_date) @mark_grpc_method def DeleteByRange(self, request, context): From 3ddd181dd2225c1166d3989249d984ae7677538a Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 14:02:48 +0800 Subject: [PATCH 081/196] update for better test --- conftest.py | 2 +- mishards/server.py | 2 +- mishards/settings.py | 7 +++++-- sd/kubernetes_provider.py | 6 ++++-- sd/static_provider.py | 6 ++++-- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/conftest.py b/conftest.py index ebe8276cea..34e22af693 100644 --- a/conftest.py +++ b/conftest.py @@ -20,7 +20,7 @@ def app(request): @pytest.fixture def started_app(app): app.on_pre_run() - app.start(app.port) + app.start(settings.SERVER_TEST_PORT) yield app diff --git a/mishards/server.py b/mishards/server.py index 6eb0e92582..599a00e455 100644 --- a/mishards/server.py +++ b/mishards/server.py @@ -91,7 +91,7 @@ class Server: handler_class(tracer=self.tracer, router=self.router), self.server_impl) self.server_impl.add_insecure_port("[::]:{}".format( - str(port or self._port))) + str(port or self.port))) self.server_impl.start() def run(self, port): diff --git a/mishards/settings.py b/mishards/settings.py index 773c04f083..21a3bb7a65 100644 --- a/mishards/settings.py +++ b/mishards/settings.py @@ -25,6 +25,7 @@ TIMEOUT = env.int('TIMEOUT', 60) MAX_RETRY = env.int('MAX_RETRY', 3) SERVER_PORT = env.int('SERVER_PORT', 19530) +SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) WOSERVER = env.str('WOSERVER') SD_PROVIDER_SETTINGS = None @@ -36,11 +37,13 @@ if SD_PROVIDER == 'Kubernetes': in_cluster=env.bool('SD_IN_CLUSTER', False), poll_interval=env.int('SD_POLL_INTERVAL', 5), pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', '')) + label_selector=env.str('SD_LABEL_SELECTOR', ''), + port=env.int('SD_PORT', 19530)) elif SD_PROVIDER == 'Static': from sd.static_provider import StaticProviderSettings SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', [])) + hosts=env.list('SD_STATIC_HOSTS', []), + port=env.int('SD_STATIC_PORT', 19530)) # TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py index ca593a3682..eb113db007 100644 --- a/sd/kubernetes_provider.py +++ b/sd/kubernetes_provider.py @@ -226,12 +226,13 @@ class EventHandler(threading.Thread): class KubernetesProviderSettings: def __init__(self, namespace, pod_patt, label_selector, in_cluster, - poll_interval, **kwargs): + poll_interval, port=None, **kwargs): self.namespace = namespace self.pod_patt = pod_patt self.label_selector = label_selector self.in_cluster = in_cluster self.poll_interval = poll_interval + self.port = int(port) if port else 19530 @singleton @@ -245,6 +246,7 @@ class KubernetesProvider(object): self.label_selector = settings.label_selector self.in_cluster = settings.in_cluster self.poll_interval = settings.poll_interval + self.port = settings.port self.kwargs = kwargs self.queue = queue.Queue() @@ -279,7 +281,7 @@ class KubernetesProvider(object): **kwargs) def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) def delete_pod(self, name): self.conn_mgr.unregister(name) diff --git a/sd/static_provider.py b/sd/static_provider.py index 5c97c4efd0..e88780740f 100644 --- a/sd/static_provider.py +++ b/sd/static_provider.py @@ -9,8 +9,9 @@ from sd import ProviderManager class StaticProviderSettings: - def __init__(self, hosts): + def __init__(self, hosts, port=None): self.hosts = hosts + self.port = int(port) if port else 19530 @singleton @@ -21,6 +22,7 @@ class KubernetesProvider(object): def __init__(self, settings, conn_mgr, **kwargs): self.conn_mgr = conn_mgr self.hosts = [socket.gethostbyname(host) for host in settings.hosts] + self.port = settings.port def start(self): for host in self.hosts: @@ -31,7 +33,7 @@ class KubernetesProvider(object): self.delete_pod(host) def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:19530'.format(ip)) + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) def delete_pod(self, name): self.conn_mgr.unregister(name) From 9dc45d650c713caa8876b7693d526e66922db629 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 14:03:06 +0800 Subject: [PATCH 082/196] update test_server --- mishards/test_server.py | 70 ++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/mishards/test_server.py b/mishards/test_server.py index a2677847da..2f24a1167b 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -13,6 +13,7 @@ from mishards import db, create_app, settings from mishards.service_handler import ServiceHandler from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables +from mishards.routings import RouterMixin logger = logging.getLogger(__name__) @@ -22,9 +23,10 @@ BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') @pytest.mark.usefixtures('started_app') class TestServer: - def client(self, port): + @property + def client(self): m = Milvus() - m.connect(host='localhost', port=port) + m.connect(host='localhost', port=settings.SERVER_TEST_PORT) return m def test_server_start(self, started_app): @@ -33,22 +35,22 @@ class TestServer: def test_cmd(self, started_app): ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, '')) - status, _ = self.client(started_app.port).server_version() + status, _ = self.client.server_version() assert status.OK() Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) - status, _ = self.client(started_app.port).server_version() + status, _ = self.client.server_version() assert not status.OK() def test_drop_index(self, started_app): table_name = inspect.currentframe().f_code.co_name ServiceHandler._drop_index = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).drop_index(table_name) + status = self.client.drop_index(table_name) assert status.OK() Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status = self.client(started_app.port).drop_index(table_name) + status = self.client.drop_index(table_name) assert not status.OK() def test_describe_index(self, started_app): @@ -62,13 +64,13 @@ class TestServer: return_value=(OK, table_name)) ServiceHandler._describe_index = mock.MagicMock( return_value=(OK, index_param)) - status, ret = self.client(started_app.port).describe_index(table_name) + status, ret = self.client.describe_index(table_name) assert status.OK() assert ret._table_name == index_param._table_name Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status, _ = self.client(started_app.port).describe_index(table_name) + status, _ = self.client.describe_index(table_name) assert not status.OK() def test_preload(self, started_app): @@ -77,12 +79,12 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._preload_table = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).preload_table(table_name) + status = self.client.preload_table(table_name) assert status.OK() Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status = self.client(started_app.port).preload_table(table_name) + status = self.client.preload_table(table_name) assert not status.OK() def test_delete_by_range(self, started_app): @@ -94,13 +96,13 @@ class TestServer: Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( return_value=(OK, unpacked)) ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).delete_vectors_by_range( + status = self.client.delete_vectors_by_range( *unpacked) assert status.OK() Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( return_value=(BAD, unpacked)) - status = self.client(started_app.port).delete_vectors_by_range( + status = self.client.delete_vectors_by_range( *unpacked) assert not status.OK() @@ -111,21 +113,19 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) - status, ret = self.client( - started_app.port).get_table_row_count(table_name) + status, ret = self.client.get_table_row_count(table_name) assert status.OK() assert ret == count Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status, _ = self.client( - started_app.port).get_table_row_count(table_name) + status, _ = self.client.get_table_row_count(table_name) assert not status.OK() def test_show_tables(self, started_app): tables = ['t1', 't2'] ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) - status, ret = self.client(started_app.port).show_tables() + status, ret = self.client.show_tables() assert status.OK() assert ret == tables @@ -141,17 +141,17 @@ class TestServer: return_value=(OK, table_schema.table_name)) ServiceHandler._describe_table = mock.MagicMock( return_value=(OK, table_schema)) - status, _ = self.client(started_app.port).describe_table(table_name) + status, _ = self.client.describe_table(table_name) assert status.OK() ServiceHandler._describe_table = mock.MagicMock( return_value=(BAD, table_schema)) - status, _ = self.client(started_app.port).describe_table(table_name) + status, _ = self.client.describe_table(table_name) assert not status.OK() Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, 'cmd')) - status, ret = self.client(started_app.port).describe_table(table_name) + status, ret = self.client.describe_table(table_name) assert not status.OK() def test_insert(self, started_app): @@ -159,7 +159,7 @@ class TestServer: vectors = [[random.random() for _ in range(16)] for _ in range(10)] ids = [random.randint(1000000, 20000000) for _ in range(10)] ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) - status, ret = self.client(started_app.port).add_vectors( + status, ret = self.client.add_vectors( table_name=table_name, records=vectors) assert status.OK() assert ids == ret @@ -170,14 +170,12 @@ class TestServer: Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, unpacks)) ServiceHandler._create_index = mock.MagicMock(return_value=OK) - status = self.client( - started_app.port).create_index(table_name=table_name) + status = self.client.create_index(table_name=table_name) assert status.OK() Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, None)) - status = self.client( - started_app.port).create_index(table_name=table_name) + status = self.client.create_index(table_name=table_name) assert not status.OK() def test_drop_table(self, started_app): @@ -186,14 +184,12 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._delete_table = mock.MagicMock(return_value=OK) - status = self.client( - started_app.port).delete_table(table_name=table_name) + status = self.client.delete_table(table_name=table_name) assert status.OK() Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - status = self.client( - started_app.port).delete_table(table_name=table_name) + status = self.client.delete_table(table_name=table_name) assert not status.OK() def test_has_table(self, started_app): @@ -202,12 +198,12 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) ServiceHandler._has_table = mock.MagicMock(return_value=True) - has = self.client(started_app.port).has_table(table_name=table_name) + has = self.client.has_table(table_name=table_name) assert has Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - has = self.client(started_app.port).has_table(table_name=table_name) + has = self.client.has_table(table_name=table_name) assert not has def test_create_table(self, started_app): @@ -219,12 +215,12 @@ class TestServer: dimension=dimension) ServiceHandler._create_table = mock.MagicMock(return_value=OK) - status = self.client(started_app.port).create_table(table_schema) + status = self.client.create_table(table_schema) assert status.OK() Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, None)) - status = self.client(started_app.port).create_table(table_schema) + status = self.client.create_table(table_schema) assert not status.OK() def random_data(self, n, dimension): @@ -261,19 +257,21 @@ class TestServer: metric_type=table.metric_type, dimension=table.dimension) - status, _ = self.client(started_app.port).search_vectors(**param) + status, _ = self.client.search_vectors(**param) assert status.code == Status.ILLEGAL_ARGUMENT param['nprobe'] = 2048 + RouterMixin.connection = mock.MagicMock(return_value=Milvus()) + RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) Milvus.describe_table = mock.MagicMock(return_value=(BAD, table_schema)) - status, ret = self.client(started_app.port).search_vectors(**param) + status, ret = self.client.search_vectors(**param) assert status.code == Status.TABLE_NOT_EXISTS Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) Milvus.search_vectors_in_files = mock.MagicMock( return_value=mock_results) - status, ret = self.client(started_app.port).search_vectors(**param) + status, ret = self.client.search_vectors(**param) assert status.OK() assert len(ret) == nq From 4efa4506a99e044cd6a3d39e7713f1ef78fc4877 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 19 Oct 2019 14:06:35 +0800 Subject: [PATCH 083/196] update .env.example --- mishards/.env.example | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mishards/.env.example b/mishards/.env.example index bfea0a3edc..0a23c0cf56 100644 --- a/mishards/.env.example +++ b/mishards/.env.example @@ -1,8 +1,8 @@ DEBUG=True WOSERVER=tcp://127.0.0.1:19530 -TESTING_WOSERVER=tcp://127.0.0.1:19530 SERVER_PORT=19532 +SERVER_TEST_PORT=19888 SD_PROVIDER=Static @@ -13,16 +13,17 @@ SD_ROSERVER_POD_PATT=.*-ro-servers-.* SD_LABEL_SELECTOR=tier=ro-servers SD_STATIC_HOSTS=127.0.0.1 +SD_STATIC_PORT=19530 #SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True -TESTING=False #SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_TEST_ECHO=False +# TRACING_TEST_TYPE=jaeger TRACING_TYPE=jaeger TRACING_SERVICE_NAME=fortest TRACING_SAMPLER_TYPE=const From a27eef278b538ed21010a0719885c49c7ec597e2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 11:42:54 +0800 Subject: [PATCH 084/196] update for new sdk --- mishards/service_handler.py | 4 ++-- mishards/test_server.py | 6 ++++-- requirements.txt | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 485aa8b211..4519afbaa0 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -7,7 +7,7 @@ import multiprocessing from concurrent.futures import ThreadPoolExecutor from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 from milvus.grpc_gen.milvus_pb2 import TopKQueryResult -from milvus.client.Abstract import Range +from milvus.client.abstract import Range from milvus.client import types as Types from mishards import (db, settings, exceptions) @@ -109,7 +109,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): query_records=vectors, top_k=topk, nprobe=nprobe, - lazy=True) + lazy_=True) end = time.time() logger.info('search_vectors_in_files takes: {}'.format(end - start)) diff --git a/mishards/test_server.py b/mishards/test_server.py index 2f24a1167b..a7fec615c9 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -7,7 +7,7 @@ import faker import inspect from milvus import Milvus from milvus.client.types import Status, IndexType, MetricType -from milvus.client.Abstract import IndexParam, TableSchema +from milvus.client.abstract import IndexParam, TableSchema from milvus.grpc_gen import status_pb2, milvus_pb2 from mishards import db, create_app, settings from mishards.service_handler import ServiceHandler @@ -87,6 +87,7 @@ class TestServer: status = self.client.preload_table(table_name) assert not status.OK() + @pytest.mark.skip def test_delete_by_range(self, started_app): table_name = inspect.currentframe().f_code.co_name @@ -203,7 +204,8 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(BAD, table_name)) - has = self.client.has_table(table_name=table_name) + status, has = self.client.has_table(table_name=table_name) + assert not status.OK() assert not has def test_create_table(self, started_app): diff --git a/requirements.txt b/requirements.txt index 133cfac8ab..ae224e92ed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ py==1.8.0 pyasn1==0.4.7 pyasn1-modules==0.2.6 pylint==2.3.1 -pymilvus-test==0.2.21 +pymilvus-test==0.2.28 #pymilvus==0.2.0 pyparsing==2.4.0 pytest==4.6.3 From 703371efa379c9eba1c0c36004db25e7e9b22521 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 12:08:45 +0800 Subject: [PATCH 085/196] check return index param in DescribeIndex --- mishards/service_handler.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 4519afbaa0..0c6b41ece6 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -441,6 +441,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): _status, _index_param = self._describe_index(table_name=_table_name, metadata=metadata) + if not _index_param: + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + _index = milvus_pb2.Index(index_type=_index_param._index_type, nlist=_index_param._nlist) From 26b3adfcc37d4b0e18b953786d47f9fcb39c89a3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 14:34:12 +0800 Subject: [PATCH 086/196] update for new sdk changes --- mishards/service_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 0c6b41ece6..44e1d8cf7b 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -169,11 +169,11 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) - _bool = self._has_table(_table_name, + _status, _bool = self._has_table(_table_name, metadata={'resp_class': milvus_pb2.BoolReply}) return milvus_pb2.BoolReply(status=status_pb2.Status( - error_code=status_pb2.SUCCESS, reason="OK"), + error_code=_status.code, reason=_status.message), bool_reply=_bool) def _delete_table(self, table_name): From c4a5c5c69b5f2bb4d8b7f016e230a74d5ddfd2d5 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 14:58:39 +0800 Subject: [PATCH 087/196] bug fix for time range and topk check in search --- mishards/exception_codes.py | 1 + mishards/exception_handlers.py | 6 ++++++ mishards/exceptions.py | 4 ++++ mishards/service_handler.py | 5 +++++ mishards/utilities.py | 6 +++--- 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py index ecb2469562..bdd4572dd5 100644 --- a/mishards/exception_codes.py +++ b/mishards/exception_codes.py @@ -7,3 +7,4 @@ DB_ERROR_CODE = 10003 TABLE_NOT_FOUND_CODE = 20001 INVALID_ARGUMENT_CODE = 20002 INVALID_DATE_RANGE_CODE = 20003 +INVALID_TOPK_CODE = 20004 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py index 1e5ffb3529..c79a6db5a3 100644 --- a/mishards/exception_handlers.py +++ b/mishards/exception_handlers.py @@ -58,6 +58,12 @@ def TableNotFoundErrorHandler(err): return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) +@server.errorhandler(exceptions.InvalidTopKError) +def InvalidTopKErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_TOPK) + + @server.errorhandler(exceptions.InvalidArgumentError) def InvalidArgumentErrorHandler(err): logger.error(err) diff --git a/mishards/exceptions.py b/mishards/exceptions.py index acd9372d6a..72839f88d2 100644 --- a/mishards/exceptions.py +++ b/mishards/exceptions.py @@ -26,6 +26,10 @@ class TableNotFoundError(BaseException): code = codes.TABLE_NOT_FOUND_CODE +class InvalidTopKError(BaseException): + code = codes.INVALID_TOPK_CODE + + class InvalidArgumentError(BaseException): code = codes.INVALID_ARGUMENT_CODE diff --git a/mishards/service_handler.py b/mishards/service_handler.py index 44e1d8cf7b..5e91c14f14 100644 --- a/mishards/service_handler.py +++ b/mishards/service_handler.py @@ -20,6 +20,7 @@ logger = logging.getLogger(__name__) class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): MAX_NPROBE = 2048 + MAX_TOPK = 2048 def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): self.table_meta = {} @@ -246,6 +247,10 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): raise exceptions.InvalidArgumentError( message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) + if topk > self.MAX_TOPK or topk <= 0: + raise exceptions.InvalidTopKError( + message='Invalid topk: {}'.format(topk), metadata=metadata) + table_meta = self.table_meta.get(table_name, None) if not table_meta: diff --git a/mishards/utilities.py b/mishards/utilities.py index c08d0d42df..42e982b5f1 100644 --- a/mishards/utilities.py +++ b/mishards/utilities.py @@ -2,12 +2,12 @@ import datetime from mishards import exceptions -def format_date(self, start, end): +def format_date(start, end): return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) -def range_to_date(self, range_obj, metadata=None): +def range_to_date(range_obj, metadata=None): try: start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') @@ -17,4 +17,4 @@ def range_to_date(self, range_obj, metadata=None): range_obj.start_date, range_obj.end_date), metadata=metadata) - return self.format_date(start, end) + return format_date(start, end) From e47f3ec28a89715745be8949c160e81f416fcd9f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 15:06:58 +0800 Subject: [PATCH 088/196] update to latest image --- start_services.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/start_services.yml b/start_services.yml index c7a3c36f51..57fe061bb7 100644 --- a/start_services.yml +++ b/start_services.yml @@ -21,7 +21,7 @@ services: mishards: restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.3 + image: registry.zilliz.com/milvus/mishards:v0.0.4 ports: - "0.0.0.0:19530:19531" - "0.0.0.0:19532:19532" From 7b0a731e047b571c1154ca0dba37f8be8f867c8d Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 15:45:25 +0800 Subject: [PATCH 089/196] fix bug in test_server --- mishards/test_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mishards/test_server.py b/mishards/test_server.py index a7fec615c9..efd3912076 100644 --- a/mishards/test_server.py +++ b/mishards/test_server.py @@ -198,7 +198,7 @@ class TestServer: Parser.parse_proto_TableName = mock.MagicMock( return_value=(OK, table_name)) - ServiceHandler._has_table = mock.MagicMock(return_value=True) + ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) has = self.client.has_table(table_name=table_name) assert has From 9a4c732563323cd8814a11a5eda8891745e264ba Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 16:20:29 +0800 Subject: [PATCH 090/196] fix bug in test_server --- Dockerfile | 10 - build.sh | 39 -- conftest.py | 27 -- manager.py | 28 -- mishards/.env.example | 33 -- mishards/__init__.py | 36 -- mishards/connections.py | 154 -------- mishards/db_base.py | 52 --- mishards/exception_codes.py | 10 - mishards/exception_handlers.py | 82 ---- mishards/exceptions.py | 38 -- mishards/factories.py | 54 --- mishards/grpc_utils/__init__.py | 37 -- mishards/grpc_utils/grpc_args_parser.py | 102 ----- mishards/grpc_utils/grpc_args_wrapper.py | 4 - mishards/grpc_utils/test_grpc.py | 75 ---- mishards/hash_ring.py | 150 ------- mishards/main.py | 15 - mishards/models.py | 76 ---- mishards/routings.py | 96 ----- mishards/server.py | 122 ------ mishards/service_handler.py | 475 ----------------------- mishards/settings.py | 94 ----- mishards/test_connections.py | 101 ----- mishards/test_models.py | 39 -- mishards/test_server.py | 279 ------------- mishards/utilities.py | 20 - requirements.txt | 36 -- sd/__init__.py | 28 -- sd/kubernetes_provider.py | 331 ---------------- sd/static_provider.py | 39 -- setup.cfg | 4 - start_services.yml | 45 --- tracing/__init__.py | 43 -- tracing/factory.py | 40 -- utils/__init__.py | 11 - utils/logger_helper.py | 152 -------- 37 files changed, 2977 deletions(-) delete mode 100644 Dockerfile delete mode 100755 build.sh delete mode 100644 conftest.py delete mode 100644 manager.py delete mode 100644 mishards/.env.example delete mode 100644 mishards/__init__.py delete mode 100644 mishards/connections.py delete mode 100644 mishards/db_base.py delete mode 100644 mishards/exception_codes.py delete mode 100644 mishards/exception_handlers.py delete mode 100644 mishards/exceptions.py delete mode 100644 mishards/factories.py delete mode 100644 mishards/grpc_utils/__init__.py delete mode 100644 mishards/grpc_utils/grpc_args_parser.py delete mode 100644 mishards/grpc_utils/grpc_args_wrapper.py delete mode 100644 mishards/grpc_utils/test_grpc.py delete mode 100644 mishards/hash_ring.py delete mode 100644 mishards/main.py delete mode 100644 mishards/models.py delete mode 100644 mishards/routings.py delete mode 100644 mishards/server.py delete mode 100644 mishards/service_handler.py delete mode 100644 mishards/settings.py delete mode 100644 mishards/test_connections.py delete mode 100644 mishards/test_models.py delete mode 100644 mishards/test_server.py delete mode 100644 mishards/utilities.py delete mode 100644 requirements.txt delete mode 100644 sd/__init__.py delete mode 100644 sd/kubernetes_provider.py delete mode 100644 sd/static_provider.py delete mode 100644 setup.cfg delete mode 100644 start_services.yml delete mode 100644 tracing/__init__.py delete mode 100644 tracing/factory.py delete mode 100644 utils/__init__.py delete mode 100644 utils/logger_helper.py diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 594640619e..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM python:3.6 -RUN apt update && apt install -y \ - less \ - telnet -RUN mkdir /source -WORKDIR /source -ADD ./requirements.txt ./ -RUN pip install -r requirements.txt -COPY . . -CMD python mishards/main.py diff --git a/build.sh b/build.sh deleted file mode 100755 index fad30518f2..0000000000 --- a/build.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -BOLD=`tput bold` -NORMAL=`tput sgr0` -YELLOW='\033[1;33m' -ENDC='\033[0m' - -echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" - -function build_image() { - dockerfile=$1 - remote_registry=$2 - tagged=$2 - buildcmd="docker build -t ${tagged} -f ${dockerfile} ." - echo -e "${BOLD}$buildcmd${NORMAL}" - $buildcmd - pushcmd="docker push ${remote_registry}" - echo -e "${BOLD}$pushcmd${NORMAL}" - $pushcmd - echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" -} - -case "$1" in - -all) - [[ -z $MISHARDS_REGISTRY ]] && { - echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" - exit 1 - } - - version="" - [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" - ;; -*) - echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" - ;; -esac diff --git a/conftest.py b/conftest.py deleted file mode 100644 index 34e22af693..0000000000 --- a/conftest.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging -import pytest -import grpc -from mishards import settings, db, create_app - -logger = logging.getLogger(__name__) - - -@pytest.fixture -def app(request): - app = create_app(settings.TestingConfig) - db.drop_all() - db.create_all() - - yield app - - db.drop_all() - - -@pytest.fixture -def started_app(app): - app.on_pre_run() - app.start(settings.SERVER_TEST_PORT) - - yield app - - app.stop() diff --git a/manager.py b/manager.py deleted file mode 100644 index 931c90ebc8..0000000000 --- a/manager.py +++ /dev/null @@ -1,28 +0,0 @@ -import fire -from mishards import db -from sqlalchemy import and_ - - -class DBHandler: - @classmethod - def create_all(cls): - db.create_all() - - @classmethod - def drop_all(cls): - db.drop_all() - - @classmethod - def fun(cls, tid): - from mishards.factories import TablesFactory, TableFilesFactory, Tables - f = db.Session.query(Tables).filter(and_( - Tables.table_id == tid, - Tables.state != Tables.TO_DELETE) - ).first() - print(f) - - # f1 = TableFilesFactory() - - -if __name__ == '__main__': - fire.Fire(DBHandler) diff --git a/mishards/.env.example b/mishards/.env.example deleted file mode 100644 index 0a23c0cf56..0000000000 --- a/mishards/.env.example +++ /dev/null @@ -1,33 +0,0 @@ -DEBUG=True - -WOSERVER=tcp://127.0.0.1:19530 -SERVER_PORT=19532 -SERVER_TEST_PORT=19888 - -SD_PROVIDER=Static - -SD_NAMESPACE=xp -SD_IN_CLUSTER=False -SD_POLL_INTERVAL=5 -SD_ROSERVER_POD_PATT=.*-ro-servers-.* -SD_LABEL_SELECTOR=tier=ro-servers - -SD_STATIC_HOSTS=127.0.0.1 -SD_STATIC_PORT=19530 - -#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 -SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False -SQL_ECHO=True - -#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 -SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False -SQL_TEST_ECHO=False - -# TRACING_TEST_TYPE=jaeger -TRACING_TYPE=jaeger -TRACING_SERVICE_NAME=fortest -TRACING_SAMPLER_TYPE=const -TRACING_SAMPLER_PARAM=1 -TRACING_LOG_PAYLOAD=True -#TRACING_SAMPLER_TYPE=probabilistic -#TRACING_SAMPLER_PARAM=0.5 diff --git a/mishards/__init__.py b/mishards/__init__.py deleted file mode 100644 index 7db3d8cb5e..0000000000 --- a/mishards/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging -from mishards import settings -logger = logging.getLogger() - -from mishards.db_base import DB -db = DB() - -from mishards.server import Server -grpc_server = Server() - - -def create_app(testing_config=None): - config = testing_config if testing_config else settings.DefaultConfig - db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) - - from mishards.connections import ConnectionMgr - connect_mgr = ConnectionMgr() - - from sd import ProviderManager - - sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) - discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) - - from tracing.factory import TracerFactory - from mishards.grpc_utils import GrpcSpanDecorator - tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) - - from mishards.routings import RouterFactory - router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) - - grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) - - from mishards import exception_handlers - - return grpc_server diff --git a/mishards/connections.py b/mishards/connections.py deleted file mode 100644 index 618690a099..0000000000 --- a/mishards/connections.py +++ /dev/null @@ -1,154 +0,0 @@ -import logging -import threading -from functools import wraps -from milvus import Milvus - -from mishards import (settings, exceptions) -from utils import singleton - -logger = logging.getLogger(__name__) - - -class Connection: - def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): - self.name = name - self.uri = uri - self.max_retry = max_retry - self.retried = 0 - self.conn = Milvus() - self.error_handlers = [] if not error_handlers else error_handlers - self.on_retry_func = kwargs.get('on_retry_func', None) - # self._connect() - - def __str__(self): - return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) - - def _connect(self, metadata=None): - try: - self.conn.connect(uri=self.uri) - except Exception as e: - if not self.error_handlers: - raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) - for handler in self.error_handlers: - handler(e, metadata=metadata) - - @property - def can_retry(self): - return self.retried < self.max_retry - - @property - def connected(self): - return self.conn.connected() - - def on_retry(self): - if self.on_retry_func: - self.on_retry_func(self) - else: - self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) - - def on_connect(self, metadata=None): - while not self.connected and self.can_retry: - self.retried += 1 - self.on_retry() - self._connect(metadata=metadata) - - if not self.can_retry and not self.connected: - raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, - metadata=metadata)) - - self.retried = 0 - - def connect(self, func, exception_handler=None): - @wraps(func) - def inner(*args, **kwargs): - self.on_connect() - try: - return func(*args, **kwargs) - except Exception as e: - if exception_handler: - exception_handler(e) - else: - raise e - return inner - - -@singleton -class ConnectionMgr: - def __init__(self): - self.metas = {} - self.conns = {} - - @property - def conn_names(self): - return set(self.metas.keys()) - set(['WOSERVER']) - - def conn(self, name, metadata, throw=False): - c = self.conns.get(name, None) - if not c: - url = self.metas.get(name, None) - if not url: - if not throw: - return None - raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), - metadata=metadata) - this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) - threaded = { - threading.get_ident(): this_conn - } - self.conns[name] = threaded - return this_conn - - tid = threading.get_ident() - rconn = c.get(tid, None) - if not rconn: - url = self.metas.get(name, None) - if not url: - if not throw: - return None - raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), - metadata=metadata) - this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) - c[tid] = this_conn - return this_conn - - return rconn - - def on_new_meta(self, name, url): - logger.info('Register Connection: name={};url={}'.format(name, url)) - self.metas[name] = url - - def on_duplicate_meta(self, name, url): - if self.metas[name] == url: - return self.on_same_meta(name, url) - - return self.on_diff_meta(name, url) - - def on_same_meta(self, name, url): - # logger.warning('Register same meta: {}:{}'.format(name, url)) - pass - - def on_diff_meta(self, name, url): - logger.warning('Received {} with diff url={}'.format(name, url)) - self.metas[name] = url - self.conns[name] = {} - - def on_unregister_meta(self, name, url): - logger.info('Unregister name={};url={}'.format(name, url)) - self.conns.pop(name, None) - - def on_nonexisted_meta(self, name): - logger.warning('Non-existed meta: {}'.format(name)) - - def register(self, name, url): - meta = self.metas.get(name) - if not meta: - return self.on_new_meta(name, url) - else: - return self.on_duplicate_meta(name, url) - - def unregister(self, name): - logger.info('Unregister Connection: name={}'.format(name)) - url = self.metas.pop(name, None) - if url is None: - return self.on_nonexisted_meta(name) - return self.on_unregister_meta(name, url) diff --git a/mishards/db_base.py b/mishards/db_base.py deleted file mode 100644 index 5f2eee9ba1..0000000000 --- a/mishards/db_base.py +++ /dev/null @@ -1,52 +0,0 @@ -import logging -from sqlalchemy import create_engine -from sqlalchemy.engine.url import make_url -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker, scoped_session -from sqlalchemy.orm.session import Session as SessionBase - -logger = logging.getLogger(__name__) - - -class LocalSession(SessionBase): - def __init__(self, db, autocommit=False, autoflush=True, **options): - self.db = db - bind = options.pop('bind', None) or db.engine - SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) - - -class DB: - Model = declarative_base() - - def __init__(self, uri=None, echo=False): - self.echo = echo - uri and self.init_db(uri, echo) - self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) - - def init_db(self, uri, echo=False): - url = make_url(uri) - if url.get_backend_name() == 'sqlite': - self.engine = create_engine(url) - else: - self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, - pool_pre_ping=True, - echo=echo, - max_overflow=0) - self.uri = uri - self.url = url - - def __str__(self): - return ''.format(self.url.get_backend_name(), self.url.database) - - @property - def Session(self): - return self.session_factory() - - def remove_session(self): - self.session_factory.remove() - - def drop_all(self): - self.Model.metadata.drop_all(self.engine) - - def create_all(self): - self.Model.metadata.create_all(self.engine) diff --git a/mishards/exception_codes.py b/mishards/exception_codes.py deleted file mode 100644 index bdd4572dd5..0000000000 --- a/mishards/exception_codes.py +++ /dev/null @@ -1,10 +0,0 @@ -INVALID_CODE = -1 - -CONNECT_ERROR_CODE = 10001 -CONNECTTION_NOT_FOUND_CODE = 10002 -DB_ERROR_CODE = 10003 - -TABLE_NOT_FOUND_CODE = 20001 -INVALID_ARGUMENT_CODE = 20002 -INVALID_DATE_RANGE_CODE = 20003 -INVALID_TOPK_CODE = 20004 diff --git a/mishards/exception_handlers.py b/mishards/exception_handlers.py deleted file mode 100644 index c79a6db5a3..0000000000 --- a/mishards/exception_handlers.py +++ /dev/null @@ -1,82 +0,0 @@ -import logging -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 -from mishards import grpc_server as server, exceptions - -logger = logging.getLogger(__name__) - - -def resp_handler(err, error_code): - if not isinstance(err, exceptions.BaseException): - return status_pb2.Status(error_code=error_code, reason=str(err)) - - status = status_pb2.Status(error_code=error_code, reason=err.message) - - if err.metadata is None: - return status - - resp_class = err.metadata.get('resp_class', None) - if not resp_class: - return status - - if resp_class == milvus_pb2.BoolReply: - return resp_class(status=status, bool_reply=False) - - if resp_class == milvus_pb2.VectorIds: - return resp_class(status=status, vector_id_array=[]) - - if resp_class == milvus_pb2.TopKQueryResultList: - return resp_class(status=status, topk_query_result=[]) - - if resp_class == milvus_pb2.TableRowCount: - return resp_class(status=status, table_row_count=-1) - - if resp_class == milvus_pb2.TableName: - return resp_class(status=status, table_name=[]) - - if resp_class == milvus_pb2.StringReply: - return resp_class(status=status, string_reply='') - - if resp_class == milvus_pb2.TableSchema: - return milvus_pb2.TableSchema( - status=status - ) - - if resp_class == milvus_pb2.IndexParam: - return milvus_pb2.IndexParam( - table_name=milvus_pb2.TableName( - status=status - ) - ) - - status.error_code = status_pb2.UNEXPECTED_ERROR - return status - - -@server.errorhandler(exceptions.TableNotFoundError) -def TableNotFoundErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) - - -@server.errorhandler(exceptions.InvalidTopKError) -def InvalidTopKErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.ILLEGAL_TOPK) - - -@server.errorhandler(exceptions.InvalidArgumentError) -def InvalidArgumentErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) - - -@server.errorhandler(exceptions.DBError) -def DBErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.UNEXPECTED_ERROR) - - -@server.errorhandler(exceptions.InvalidRangeError) -def InvalidArgumentErrorHandler(err): - logger.error(err) - return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/mishards/exceptions.py b/mishards/exceptions.py deleted file mode 100644 index 72839f88d2..0000000000 --- a/mishards/exceptions.py +++ /dev/null @@ -1,38 +0,0 @@ -import mishards.exception_codes as codes - - -class BaseException(Exception): - code = codes.INVALID_CODE - message = 'BaseException' - - def __init__(self, message='', metadata=None): - self.message = self.__class__.__name__ if not message else message - self.metadata = metadata - - -class ConnectionConnectError(BaseException): - code = codes.CONNECT_ERROR_CODE - - -class ConnectionNotFoundError(BaseException): - code = codes.CONNECTTION_NOT_FOUND_CODE - - -class DBError(BaseException): - code = codes.DB_ERROR_CODE - - -class TableNotFoundError(BaseException): - code = codes.TABLE_NOT_FOUND_CODE - - -class InvalidTopKError(BaseException): - code = codes.INVALID_TOPK_CODE - - -class InvalidArgumentError(BaseException): - code = codes.INVALID_ARGUMENT_CODE - - -class InvalidRangeError(BaseException): - code = codes.INVALID_DATE_RANGE_CODE diff --git a/mishards/factories.py b/mishards/factories.py deleted file mode 100644 index 52c0253b39..0000000000 --- a/mishards/factories.py +++ /dev/null @@ -1,54 +0,0 @@ -import time -import datetime -import random -import factory -from factory.alchemy import SQLAlchemyModelFactory -from faker import Faker -from faker.providers import BaseProvider - -from milvus.client.types import MetricType -from mishards import db -from mishards.models import Tables, TableFiles - - -class FakerProvider(BaseProvider): - def this_date(self): - t = datetime.datetime.today() - return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day - - -factory.Faker.add_provider(FakerProvider) - - -class TablesFactory(SQLAlchemyModelFactory): - class Meta: - model = Tables - sqlalchemy_session = db.session_factory - sqlalchemy_session_persistence = 'commit' - - id = factory.Faker('random_number', digits=16, fix_len=True) - table_id = factory.Faker('uuid4') - state = factory.Faker('random_element', elements=(0, 1)) - dimension = factory.Faker('random_element', elements=(256, 512)) - created_on = int(time.time()) - index_file_size = 0 - engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) - metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) - nlist = 16384 - - -class TableFilesFactory(SQLAlchemyModelFactory): - class Meta: - model = TableFiles - sqlalchemy_session = db.session_factory - sqlalchemy_session_persistence = 'commit' - - id = factory.Faker('random_number', digits=16, fix_len=True) - table = factory.SubFactory(TablesFactory) - engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) - file_id = factory.Faker('uuid4') - file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) - file_size = factory.Faker('random_number') - updated_time = int(time.time()) - created_on = int(time.time()) - date = factory.Faker('this_date') diff --git a/mishards/grpc_utils/__init__.py b/mishards/grpc_utils/__init__.py deleted file mode 100644 index f5225b2a66..0000000000 --- a/mishards/grpc_utils/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -from grpc_opentracing import SpanDecorator -from milvus.grpc_gen import status_pb2 - - -class GrpcSpanDecorator(SpanDecorator): - def __call__(self, span, rpc_info): - status = None - if not rpc_info.response: - return - if isinstance(rpc_info.response, status_pb2.Status): - status = rpc_info.response - else: - try: - status = rpc_info.response.status - except Exception as e: - status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, - reason='Should not happen') - - if status.error_code == 0: - return - error_log = {'event': 'error', - 'request': rpc_info.request, - 'response': rpc_info.response - } - span.set_tag('error', True) - span.log_kv(error_log) - - -def mark_grpc_method(func): - setattr(func, 'grpc_method', True) - return func - - -def is_grpc_method(func): - if not func: - return False - return getattr(func, 'grpc_method', False) diff --git a/mishards/grpc_utils/grpc_args_parser.py b/mishards/grpc_utils/grpc_args_parser.py deleted file mode 100644 index 039299803d..0000000000 --- a/mishards/grpc_utils/grpc_args_parser.py +++ /dev/null @@ -1,102 +0,0 @@ -from milvus import Status -from functools import wraps - - -def error_status(func): - @wraps(func) - def inner(*args, **kwargs): - try: - results = func(*args, **kwargs) - except Exception as e: - return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None - - return Status(code=0, message="Success"), results - - return inner - - -class GrpcArgsParser(object): - - @classmethod - @error_status - def parse_proto_TableSchema(cls, param): - _table_schema = { - 'status': param.status, - 'table_name': param.table_name, - 'dimension': param.dimension, - 'index_file_size': param.index_file_size, - 'metric_type': param.metric_type - } - - return _table_schema - - @classmethod - @error_status - def parse_proto_TableName(cls, param): - return param.table_name - - @classmethod - @error_status - def parse_proto_Index(cls, param): - _index = { - 'index_type': param.index_type, - 'nlist': param.nlist - } - - return _index - - @classmethod - @error_status - def parse_proto_IndexParam(cls, param): - _table_name = param.table_name - _status, _index = cls.parse_proto_Index(param.index) - - if not _status.OK(): - raise Exception("Argument parse error") - - return _table_name, _index - - @classmethod - @error_status - def parse_proto_Command(cls, param): - _cmd = param.cmd - - return _cmd - - @classmethod - @error_status - def parse_proto_Range(cls, param): - _start_value = param.start_value - _end_value = param.end_value - - return _start_value, _end_value - - @classmethod - @error_status - def parse_proto_RowRecord(cls, param): - return list(param.vector_data) - - @classmethod - @error_status - def parse_proto_SearchParam(cls, param): - _table_name = param.table_name - _topk = param.topk - _nprobe = param.nprobe - _status, _range = cls.parse_proto_Range(param.query_range_array) - - if not _status.OK(): - raise Exception("Argument parse error") - - _row_record = param.query_record_array - - return _table_name, _row_record, _range, _topk - - @classmethod - @error_status - def parse_proto_DeleteByRangeParam(cls, param): - _table_name = param.table_name - _range = param.range - _start_value = _range.start_value - _end_value = _range.end_value - - return _table_name, _start_value, _end_value diff --git a/mishards/grpc_utils/grpc_args_wrapper.py b/mishards/grpc_utils/grpc_args_wrapper.py deleted file mode 100644 index 7447dbd995..0000000000 --- a/mishards/grpc_utils/grpc_args_wrapper.py +++ /dev/null @@ -1,4 +0,0 @@ -# class GrpcArgsWrapper(object): - -# @classmethod -# def proto_TableName(cls): diff --git a/mishards/grpc_utils/test_grpc.py b/mishards/grpc_utils/test_grpc.py deleted file mode 100644 index 9af09e5d0d..0000000000 --- a/mishards/grpc_utils/test_grpc.py +++ /dev/null @@ -1,75 +0,0 @@ -import logging -import opentracing -from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method -from milvus.grpc_gen import status_pb2, milvus_pb2 - -logger = logging.getLogger(__name__) - - -class FakeTracer(opentracing.Tracer): - pass - - -class FakeSpan(opentracing.Span): - def __init__(self, context, tracer, **kwargs): - super(FakeSpan, self).__init__(tracer, context) - self.reset() - - def set_tag(self, key, value): - self.tags.append({key: value}) - - def log_kv(self, key_values, timestamp=None): - self.logs.append(key_values) - - def reset(self): - self.tags = [] - self.logs = [] - - -class FakeRpcInfo: - def __init__(self, request, response): - self.request = request - self.response = response - - -class TestGrpcUtils: - def test_span_deco(self): - request = 'request' - OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') - response = OK - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 0 - assert len(span.tags) == 0 - - response = milvus_pb2.BoolReply(status=OK, bool_reply=False) - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 0 - assert len(span.tags) == 0 - - response = 1 - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 1 - assert len(span.tags) == 1 - - response = 0 - rpc_info = FakeRpcInfo(request=request, response=response) - span = FakeSpan(context=None, tracer=FakeTracer()) - span_deco = GrpcSpanDecorator() - span_deco(span, rpc_info) - assert len(span.logs) == 0 - assert len(span.tags) == 0 - - def test_is_grpc_method(self): - target = 1 - assert not is_grpc_method(target) - target = None - assert not is_grpc_method(target) diff --git a/mishards/hash_ring.py b/mishards/hash_ring.py deleted file mode 100644 index a97f3f580e..0000000000 --- a/mishards/hash_ring.py +++ /dev/null @@ -1,150 +0,0 @@ -import math -import sys -from bisect import bisect - -if sys.version_info >= (2, 5): - import hashlib - md5_constructor = hashlib.md5 -else: - import md5 - md5_constructor = md5.new - - -class HashRing(object): - def __init__(self, nodes=None, weights=None): - """`nodes` is a list of objects that have a proper __str__ representation. - `weights` is dictionary that sets weights to the nodes. The default - weight is that all nodes are equal. - """ - self.ring = dict() - self._sorted_keys = [] - - self.nodes = nodes - - if not weights: - weights = {} - self.weights = weights - - self._generate_circle() - - def _generate_circle(self): - """Generates the circle. - """ - total_weight = 0 - for node in self.nodes: - total_weight += self.weights.get(node, 1) - - for node in self.nodes: - weight = 1 - - if node in self.weights: - weight = self.weights.get(node) - - factor = math.floor((40 * len(self.nodes) * weight) / total_weight) - - for j in range(0, int(factor)): - b_key = self._hash_digest('%s-%s' % (node, j)) - - for i in range(0, 3): - key = self._hash_val(b_key, lambda x: x + i * 4) - self.ring[key] = node - self._sorted_keys.append(key) - - self._sorted_keys.sort() - - def get_node(self, string_key): - """Given a string key a corresponding node in the hash ring is returned. - - If the hash ring is empty, `None` is returned. - """ - pos = self.get_node_pos(string_key) - if pos is None: - return None - return self.ring[self._sorted_keys[pos]] - - def get_node_pos(self, string_key): - """Given a string key a corresponding node in the hash ring is returned - along with it's position in the ring. - - If the hash ring is empty, (`None`, `None`) is returned. - """ - if not self.ring: - return None - - key = self.gen_key(string_key) - - nodes = self._sorted_keys - pos = bisect(nodes, key) - - if pos == len(nodes): - return 0 - else: - return pos - - def iterate_nodes(self, string_key, distinct=True): - """Given a string key it returns the nodes as a generator that can hold the key. - - The generator iterates one time through the ring - starting at the correct position. - - if `distinct` is set, then the nodes returned will be unique, - i.e. no virtual copies will be returned. - """ - if not self.ring: - yield None, None - - returned_values = set() - - def distinct_filter(value): - if str(value) not in returned_values: - returned_values.add(str(value)) - return value - - pos = self.get_node_pos(string_key) - for key in self._sorted_keys[pos:]: - val = distinct_filter(self.ring[key]) - if val: - yield val - - for i, key in enumerate(self._sorted_keys): - if i < pos: - val = distinct_filter(self.ring[key]) - if val: - yield val - - def gen_key(self, key): - """Given a string key it returns a long value, - this long value represents a place on the hash ring. - - md5 is currently used because it mixes well. - """ - b_key = self._hash_digest(key) - return self._hash_val(b_key, lambda x: x) - - def _hash_val(self, b_key, entry_fn): - return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( - b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] - - def _hash_digest(self, key): - m = md5_constructor() - key = key.encode() - m.update(key) - return m.digest() - - -if __name__ == '__main__': - from collections import defaultdict - servers = [ - '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', - '192.168.0.249:11212' - ] - - ring = HashRing(servers) - keys = ['{}'.format(i) for i in range(100)] - mapped = defaultdict(list) - for k in keys: - server = ring.get_node(k) - mapped[server].append(k) - - for k, v in mapped.items(): - print(k, v) diff --git a/mishards/main.py b/mishards/main.py deleted file mode 100644 index c0d142607b..0000000000 --- a/mishards/main.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import sys -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -from mishards import (settings, create_app) - - -def main(): - server = create_app(settings.DefaultConfig) - server.run(port=settings.SERVER_PORT) - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/mishards/models.py b/mishards/models.py deleted file mode 100644 index 4b6c8f9ef4..0000000000 --- a/mishards/models.py +++ /dev/null @@ -1,76 +0,0 @@ -import logging -from sqlalchemy import (Integer, Boolean, Text, - String, BigInteger, and_, or_, - Column) -from sqlalchemy.orm import relationship, backref - -from mishards import db - -logger = logging.getLogger(__name__) - - -class TableFiles(db.Model): - FILE_TYPE_NEW = 0 - FILE_TYPE_RAW = 1 - FILE_TYPE_TO_INDEX = 2 - FILE_TYPE_INDEX = 3 - FILE_TYPE_TO_DELETE = 4 - FILE_TYPE_NEW_MERGE = 5 - FILE_TYPE_NEW_INDEX = 6 - FILE_TYPE_BACKUP = 7 - - __tablename__ = 'TableFiles' - - id = Column(BigInteger, primary_key=True, autoincrement=True) - table_id = Column(String(50)) - engine_type = Column(Integer) - file_id = Column(String(50)) - file_type = Column(Integer) - file_size = Column(Integer, default=0) - row_count = Column(Integer, default=0) - updated_time = Column(BigInteger) - created_on = Column(BigInteger) - date = Column(Integer) - - table = relationship( - 'Tables', - primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', - backref=backref('files', uselist=True, lazy='dynamic') - ) - - -class Tables(db.Model): - TO_DELETE = 1 - NORMAL = 0 - - __tablename__ = 'Tables' - - id = Column(BigInteger, primary_key=True, autoincrement=True) - table_id = Column(String(50), unique=True) - state = Column(Integer) - dimension = Column(Integer) - created_on = Column(Integer) - flag = Column(Integer, default=0) - index_file_size = Column(Integer) - engine_type = Column(Integer) - nlist = Column(Integer) - metric_type = Column(Integer) - - def files_to_search(self, date_range=None): - cond = or_( - TableFiles.file_type == TableFiles.FILE_TYPE_RAW, - TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, - TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, - ) - if date_range: - cond = and_( - cond, - or_( - and_(TableFiles.date >= d[0], TableFiles.date < d[1]) for d in date_range - ) - ) - - files = self.files.filter(cond) - - logger.debug('DATE_RANGE: {}'.format(date_range)) - return files diff --git a/mishards/routings.py b/mishards/routings.py deleted file mode 100644 index 823972726f..0000000000 --- a/mishards/routings.py +++ /dev/null @@ -1,96 +0,0 @@ -import logging -from sqlalchemy import exc as sqlalchemy_exc -from sqlalchemy import and_ - -from mishards import exceptions, db -from mishards.hash_ring import HashRing -from mishards.models import Tables - -logger = logging.getLogger(__name__) - - -class RouteManager: - ROUTER_CLASSES = {} - - @classmethod - def register_router_class(cls, target): - name = target.__dict__.get('NAME', None) - name = name if name else target.__class__.__name__ - cls.ROUTER_CLASSES[name] = target - return target - - @classmethod - def get_router_class(cls, name): - return cls.ROUTER_CLASSES.get(name, None) - - -class RouterFactory: - @classmethod - def new_router(cls, name, conn_mgr, **kwargs): - router_class = RouteManager.get_router_class(name) - assert router_class - return router_class(conn_mgr, **kwargs) - - -class RouterMixin: - def __init__(self, conn_mgr): - self.conn_mgr = conn_mgr - - def routing(self, table_name, metadata=None, **kwargs): - raise NotImplemented() - - def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) - if conn: - conn.on_connect(metadata=metadata) - return conn.conn - - def query_conn(self, name, metadata=None): - conn = self.conn_mgr.conn(name, metadata=metadata) - if not conn: - raise exceptions.ConnectionNotFoundError(name, metadata=metadata) - conn.on_connect(metadata=metadata) - return conn.conn - - -@RouteManager.register_router_class -class FileBasedHashRingRouter(RouterMixin): - NAME = 'FileBasedHashRingRouter' - - def __init__(self, conn_mgr, **kwargs): - super(FileBasedHashRingRouter, self).__init__(conn_mgr) - - def routing(self, table_name, metadata=None, **kwargs): - range_array = kwargs.pop('range_array', None) - return self._route(table_name, range_array, metadata, **kwargs) - - def _route(self, table_name, range_array, metadata=None, **kwargs): - # PXU TODO: Implement Thread-local Context - # PXU TODO: Session life mgt - try: - table = db.Session.query(Tables).filter( - and_(Tables.table_id == table_name, - Tables.state != Tables.TO_DELETE)).first() - except sqlalchemy_exc.SQLAlchemyError as e: - raise exceptions.DBError(message=str(e), metadata=metadata) - - if not table: - raise exceptions.TableNotFoundError(table_name, metadata=metadata) - files = table.files_to_search(range_array) - db.remove_session() - - servers = self.conn_mgr.conn_names - logger.info('Available servers: {}'.format(servers)) - - ring = HashRing(servers) - - routing = {} - - for f in files: - target_host = ring.get_node(str(f.id)) - sub = routing.get(target_host, None) - if not sub: - routing[target_host] = {'table_id': table_name, 'file_ids': []} - routing[target_host]['file_ids'].append(str(f.id)) - - return routing diff --git a/mishards/server.py b/mishards/server.py deleted file mode 100644 index 599a00e455..0000000000 --- a/mishards/server.py +++ /dev/null @@ -1,122 +0,0 @@ -import logging -import grpc -import time -import socket -import inspect -from urllib.parse import urlparse -from functools import wraps -from concurrent import futures -from grpc._cython import cygrpc -from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server -from mishards.grpc_utils import is_grpc_method -from mishards.service_handler import ServiceHandler -from mishards import settings - -logger = logging.getLogger(__name__) - - -class Server: - def __init__(self): - self.pre_run_handlers = set() - self.grpc_methods = set() - self.error_handlers = {} - self.exit_flag = False - - def init_app(self, - conn_mgr, - tracer, - router, - discover, - port=19530, - max_workers=10, - **kwargs): - self.port = int(port) - self.conn_mgr = conn_mgr - self.tracer = tracer - self.router = router - self.discover = discover - - self.server_impl = grpc.server( - thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), - options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), - (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) - - self.server_impl = self.tracer.decorate(self.server_impl) - - self.register_pre_run_handler(self.pre_run_handler) - - def pre_run_handler(self): - woserver = settings.WOSERVER - url = urlparse(woserver) - ip = socket.gethostbyname(url.hostname) - socket.inet_pton(socket.AF_INET, ip) - self.conn_mgr.register( - 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) - - def register_pre_run_handler(self, func): - logger.info('Regiterring {} into server pre_run_handlers'.format(func)) - self.pre_run_handlers.add(func) - return func - - def wrap_method_with_errorhandler(self, func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - if e.__class__ in self.error_handlers: - return self.error_handlers[e.__class__](e) - raise - - return wrapper - - def errorhandler(self, exception): - if inspect.isclass(exception) and issubclass(exception, Exception): - - def wrapper(func): - self.error_handlers[exception] = func - return func - - return wrapper - return exception - - def on_pre_run(self): - for handler in self.pre_run_handlers: - handler() - self.discover.start() - - def start(self, port=None): - handler_class = self.decorate_handler(ServiceHandler) - add_MilvusServiceServicer_to_server( - handler_class(tracer=self.tracer, - router=self.router), self.server_impl) - self.server_impl.add_insecure_port("[::]:{}".format( - str(port or self.port))) - self.server_impl.start() - - def run(self, port): - logger.info('Milvus server start ......') - port = port or self.port - self.on_pre_run() - - self.start(port) - logger.info('Listening on port {}'.format(port)) - - try: - while not self.exit_flag: - time.sleep(5) - except KeyboardInterrupt: - self.stop() - - def stop(self): - logger.info('Server is shuting down ......') - self.exit_flag = True - self.server_impl.stop(0) - self.tracer.close() - logger.info('Server is closed') - - def decorate_handler(self, handler): - for key, attr in handler.__dict__.items(): - if is_grpc_method(attr): - setattr(handler, key, self.wrap_method_with_errorhandler(attr)) - return handler diff --git a/mishards/service_handler.py b/mishards/service_handler.py deleted file mode 100644 index 5e91c14f14..0000000000 --- a/mishards/service_handler.py +++ /dev/null @@ -1,475 +0,0 @@ -import logging -import time -import datetime -from collections import defaultdict - -import multiprocessing -from concurrent.futures import ThreadPoolExecutor -from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 -from milvus.grpc_gen.milvus_pb2 import TopKQueryResult -from milvus.client.abstract import Range -from milvus.client import types as Types - -from mishards import (db, settings, exceptions) -from mishards.grpc_utils import mark_grpc_method -from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser -from mishards import utilities - -logger = logging.getLogger(__name__) - - -class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): - MAX_NPROBE = 2048 - MAX_TOPK = 2048 - - def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): - self.table_meta = {} - self.error_handlers = {} - self.tracer = tracer - self.router = router - self.max_workers = max_workers - - def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): - status = status_pb2.Status(error_code=status_pb2.SUCCESS, - reason="Success") - if not files_n_topk_results: - return status, [] - - request_results = defaultdict(list) - - calc_time = time.time() - for files_collection in files_n_topk_results: - if isinstance(files_collection, tuple): - status, _ = files_collection - return status, [] - for request_pos, each_request_results in enumerate( - files_collection.topk_query_result): - request_results[request_pos].extend( - each_request_results.query_result_arrays) - request_results[request_pos] = sorted( - request_results[request_pos], - key=lambda x: x.distance, - reverse=reverse)[:topk] - - calc_time = time.time() - calc_time - logger.info('Merge takes {}'.format(calc_time)) - - results = sorted(request_results.items()) - topk_query_result = [] - - for result in results: - query_result = TopKQueryResult(query_result_arrays=result[1]) - topk_query_result.append(query_result) - - return status, topk_query_result - - def _do_query(self, - context, - table_id, - table_meta, - vectors, - topk, - nprobe, - range_array=None, - **kwargs): - metadata = kwargs.get('metadata', None) - range_array = [ - utilities.range_to_date(r, metadata=metadata) for r in range_array - ] if range_array else None - - routing = {} - p_span = None if self.tracer.empty else context.get_active_span( - ).context - with self.tracer.start_span('get_routing', child_of=p_span): - routing = self.router.routing(table_id, - range_array=range_array, - metadata=metadata) - logger.info('Routing: {}'.format(routing)) - - metadata = kwargs.get('metadata', None) - - rs = [] - all_topk_results = [] - - def search(addr, query_params, vectors, topk, nprobe, **kwargs): - logger.info( - 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' - .format(addr, query_params, len(vectors), topk, nprobe)) - - conn = self.router.query_conn(addr, metadata=metadata) - start = time.time() - span = kwargs.get('span', None) - span = span if span else (None if self.tracer.empty else - context.get_active_span().context) - - with self.tracer.start_span('search_{}'.format(addr), - child_of=span): - ret = conn.search_vectors_in_files( - table_name=query_params['table_id'], - file_ids=query_params['file_ids'], - query_records=vectors, - top_k=topk, - nprobe=nprobe, - lazy_=True) - end = time.time() - logger.info('search_vectors_in_files takes: {}'.format(end - start)) - - all_topk_results.append(ret) - - with self.tracer.start_span('do_search', child_of=p_span) as span: - with ThreadPoolExecutor(max_workers=self.max_workers) as pool: - for addr, params in routing.items(): - res = pool.submit(search, - addr, - params, - vectors, - topk, - nprobe, - span=span) - rs.append(res) - - for res in rs: - res.result() - - reverse = table_meta.metric_type == Types.MetricType.IP - with self.tracer.start_span('do_merge', child_of=p_span): - return self._do_merge(all_topk_results, - topk, - reverse=reverse, - metadata=metadata) - - def _create_table(self, table_schema): - return self.router.connection().create_table(table_schema) - - @mark_grpc_method - def CreateTable(self, request, context): - _status, _table_schema = Parser.parse_proto_TableSchema(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('CreateTable {}'.format(_table_schema['table_name'])) - - _status = self._create_table(_table_schema) - - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _has_table(self, table_name, metadata=None): - return self.router.connection(metadata=metadata).has_table(table_name) - - @mark_grpc_method - def HasTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return milvus_pb2.BoolReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - bool_reply=False) - - logger.info('HasTable {}'.format(_table_name)) - - _status, _bool = self._has_table(_table_name, - metadata={'resp_class': milvus_pb2.BoolReply}) - - return milvus_pb2.BoolReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - bool_reply=_bool) - - def _delete_table(self, table_name): - return self.router.connection().delete_table(table_name) - - @mark_grpc_method - def DropTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('DropTable {}'.format(_table_name)) - - _status = self._delete_table(_table_name) - - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _create_index(self, table_name, index): - return self.router.connection().create_index(table_name, index) - - @mark_grpc_method - def CreateIndex(self, request, context): - _status, unpacks = Parser.parse_proto_IndexParam(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - _table_name, _index = unpacks - - logger.info('CreateIndex {}'.format(_table_name)) - - # TODO: interface create_table incompleted - _status = self._create_index(_table_name, _index) - - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _add_vectors(self, param, metadata=None): - return self.router.connection(metadata=metadata).add_vectors( - None, None, insert_param=param) - - @mark_grpc_method - def Insert(self, request, context): - logger.info('Insert') - # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' - _status, _ids = self._add_vectors( - metadata={'resp_class': milvus_pb2.VectorIds}, param=request) - return milvus_pb2.VectorIds(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - vector_id_array=_ids) - - @mark_grpc_method - def Search(self, request, context): - - table_name = request.table_name - - topk = request.topk - nprobe = request.nprobe - - logger.info('Search {}: topk={} nprobe={}'.format( - table_name, topk, nprobe)) - - metadata = {'resp_class': milvus_pb2.TopKQueryResultList} - - if nprobe > self.MAX_NPROBE or nprobe <= 0: - raise exceptions.InvalidArgumentError( - message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) - - if topk > self.MAX_TOPK or topk <= 0: - raise exceptions.InvalidTopKError( - message='Invalid topk: {}'.format(topk), metadata=metadata) - - table_meta = self.table_meta.get(table_name, None) - - if not table_meta: - status, info = self.router.connection( - metadata=metadata).describe_table(table_name) - if not status.OK(): - raise exceptions.TableNotFoundError(table_name, - metadata=metadata) - - self.table_meta[table_name] = info - table_meta = info - - start = time.time() - - query_record_array = [] - - for query_record in request.query_record_array: - query_record_array.append(list(query_record.vector_data)) - - query_range_array = [] - for query_range in request.query_range_array: - query_range_array.append( - Range(query_range.start_value, query_range.end_value)) - - status, results = self._do_query(context, - table_name, - table_meta, - query_record_array, - topk, - nprobe, - query_range_array, - metadata=metadata) - - now = time.time() - logger.info('SearchVector takes: {}'.format(now - start)) - - topk_result_list = milvus_pb2.TopKQueryResultList( - status=status_pb2.Status(error_code=status.error_code, - reason=status.reason), - topk_query_result=results) - return topk_result_list - - @mark_grpc_method - def SearchInFiles(self, request, context): - raise NotImplemented() - - def _describe_table(self, table_name, metadata=None): - return self.router.connection(metadata=metadata).describe_table(table_name) - - @mark_grpc_method - def DescribeTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return milvus_pb2.TableSchema(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), ) - - metadata = {'resp_class': milvus_pb2.TableSchema} - - logger.info('DescribeTable {}'.format(_table_name)) - _status, _table = self._describe_table(metadata=metadata, - table_name=_table_name) - - if _status.OK(): - return milvus_pb2.TableSchema( - table_name=_table_name, - index_file_size=_table.index_file_size, - dimension=_table.dimension, - metric_type=_table.metric_type, - status=status_pb2.Status(error_code=_status.code, - reason=_status.message), - ) - - return milvus_pb2.TableSchema( - table_name=_table_name, - status=status_pb2.Status(error_code=_status.code, - reason=_status.message), - ) - - def _count_table(self, table_name, metadata=None): - return self.router.connection( - metadata=metadata).get_table_row_count(table_name) - - @mark_grpc_method - def CountTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - status = status_pb2.Status(error_code=_status.code, - reason=_status.message) - - return milvus_pb2.TableRowCount(status=status) - - logger.info('CountTable {}'.format(_table_name)) - - metadata = {'resp_class': milvus_pb2.TableRowCount} - _status, _count = self._count_table(_table_name, metadata=metadata) - - return milvus_pb2.TableRowCount( - status=status_pb2.Status(error_code=_status.code, - reason=_status.message), - table_row_count=_count if isinstance(_count, int) else -1) - - def _get_server_version(self, metadata=None): - return self.router.connection(metadata=metadata).server_version() - - @mark_grpc_method - def Cmd(self, request, context): - _status, _cmd = Parser.parse_proto_Command(request) - logger.info('Cmd: {}'.format(_cmd)) - - if not _status.OK(): - return milvus_pb2.StringReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message)) - - metadata = {'resp_class': milvus_pb2.StringReply} - - if _cmd == 'version': - _status, _reply = self._get_server_version(metadata=metadata) - else: - _status, _reply = self.router.connection( - metadata=metadata).server_status() - - return milvus_pb2.StringReply(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - string_reply=_reply) - - def _show_tables(self, metadata=None): - return self.router.connection(metadata=metadata).show_tables() - - @mark_grpc_method - def ShowTables(self, request, context): - logger.info('ShowTables') - metadata = {'resp_class': milvus_pb2.TableName} - _status, _results = self._show_tables(metadata=metadata) - - return milvus_pb2.TableNameList(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - table_names=_results) - - def _delete_by_range(self, table_name, start_date, end_date): - return self.router.connection().delete_vectors_by_range(table_name, - start_date, - end_date) - - @mark_grpc_method - def DeleteByRange(self, request, context): - _status, unpacks = \ - Parser.parse_proto_DeleteByRangeParam(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - _table_name, _start_date, _end_date = unpacks - - logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, - _end_date)) - _status = self._delete_by_range(_table_name, _start_date, _end_date) - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _preload_table(self, table_name): - return self.router.connection().preload_table(table_name) - - @mark_grpc_method - def PreloadTable(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('PreloadTable {}'.format(_table_name)) - _status = self._preload_table(_table_name) - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - def _describe_index(self, table_name, metadata=None): - return self.router.connection(metadata=metadata).describe_index(table_name) - - @mark_grpc_method - def DescribeIndex(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return milvus_pb2.IndexParam(status=status_pb2.Status( - error_code=_status.code, reason=_status.message)) - - metadata = {'resp_class': milvus_pb2.IndexParam} - - logger.info('DescribeIndex {}'.format(_table_name)) - _status, _index_param = self._describe_index(table_name=_table_name, - metadata=metadata) - - if not _index_param: - return milvus_pb2.IndexParam(status=status_pb2.Status( - error_code=_status.code, reason=_status.message)) - - _index = milvus_pb2.Index(index_type=_index_param._index_type, - nlist=_index_param._nlist) - - return milvus_pb2.IndexParam(status=status_pb2.Status( - error_code=_status.code, reason=_status.message), - table_name=_table_name, - index=_index) - - def _drop_index(self, table_name): - return self.router.connection().drop_index(table_name) - - @mark_grpc_method - def DropIndex(self, request, context): - _status, _table_name = Parser.parse_proto_TableName(request) - - if not _status.OK(): - return status_pb2.Status(error_code=_status.code, - reason=_status.message) - - logger.info('DropIndex {}'.format(_table_name)) - _status = self._drop_index(_table_name) - return status_pb2.Status(error_code=_status.code, - reason=_status.message) diff --git a/mishards/settings.py b/mishards/settings.py deleted file mode 100644 index 21a3bb7a65..0000000000 --- a/mishards/settings.py +++ /dev/null @@ -1,94 +0,0 @@ -import sys -import os - -from environs import Env -env = Env() - -FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) -if FROM_EXAMPLE: - from dotenv import load_dotenv - load_dotenv('./mishards/.env.example') -else: - env.read_env() - -DEBUG = env.bool('DEBUG', False) - -LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') -LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') -LOG_NAME = env.str('LOG_NAME', 'logfile') -TIMEZONE = env.str('TIMEZONE', 'UTC') - -from utils.logger_helper import config -config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) - -TIMEOUT = env.int('TIMEOUT', 60) -MAX_RETRY = env.int('MAX_RETRY', 3) - -SERVER_PORT = env.int('SERVER_PORT', 19530) -SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) -WOSERVER = env.str('WOSERVER') - -SD_PROVIDER_SETTINGS = None -SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') -if SD_PROVIDER == 'Kubernetes': - from sd.kubernetes_provider import KubernetesProviderSettings - SD_PROVIDER_SETTINGS = KubernetesProviderSettings( - namespace=env.str('SD_NAMESPACE', ''), - in_cluster=env.bool('SD_IN_CLUSTER', False), - poll_interval=env.int('SD_POLL_INTERVAL', 5), - pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', ''), - port=env.int('SD_PORT', 19530)) -elif SD_PROVIDER == 'Static': - from sd.static_provider import StaticProviderSettings - SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []), - port=env.int('SD_STATIC_PORT', 19530)) - -# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') - - -class TracingConfig: - TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') - TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) - TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) - TRACING_CONFIG = { - 'sampler': { - 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), - 'param': env.str('TRACING_SAMPLER_PARAM', "1"), - }, - 'local_agent': { - 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), - 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') - }, - 'logging': env.bool('TRACING_LOGGING', True) - } - DEFAULT_TRACING_CONFIG = { - 'sampler': { - 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), - 'param': env.str('TRACING_SAMPLER_PARAM', "0"), - } - } - - -class DefaultConfig: - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') - SQL_ECHO = env.bool('SQL_ECHO', False) - TRACING_TYPE = env.str('TRACING_TYPE', '') - ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') - - -class TestingConfig(DefaultConfig): - SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') - SQL_ECHO = env.bool('SQL_TEST_ECHO', False) - TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') - ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') - - -if __name__ == '__main__': - import logging - logger = logging.getLogger(__name__) - logger.debug('DEBUG') - logger.info('INFO') - logger.warn('WARN') - logger.error('ERROR') diff --git a/mishards/test_connections.py b/mishards/test_connections.py deleted file mode 100644 index 819d2e03da..0000000000 --- a/mishards/test_connections.py +++ /dev/null @@ -1,101 +0,0 @@ -import logging -import pytest -import mock - -from milvus import Milvus -from mishards.connections import (ConnectionMgr, Connection) -from mishards import exceptions - -logger = logging.getLogger(__name__) - - -@pytest.mark.usefixtures('app') -class TestConnection: - def test_manager(self): - mgr = ConnectionMgr() - - mgr.register('pod1', '111') - mgr.register('pod2', '222') - mgr.register('pod2', '222') - mgr.register('pod2', '2222') - assert len(mgr.conn_names) == 2 - - mgr.unregister('pod1') - assert len(mgr.conn_names) == 1 - - mgr.unregister('pod2') - assert len(mgr.conn_names) == 0 - - mgr.register('WOSERVER', 'xxxx') - assert len(mgr.conn_names) == 0 - - assert not mgr.conn('XXXX', None) - with pytest.raises(exceptions.ConnectionNotFoundError): - mgr.conn('XXXX', None, True) - - mgr.conn('WOSERVER', None) - - def test_connection(self): - class Conn: - def __init__(self, state): - self.state = state - - def connect(self, uri): - return self.state - - def connected(self): - return self.state - - FAIL_CONN = Conn(False) - PASS_CONN = Conn(True) - - class Retry: - def __init__(self): - self.times = 0 - - def __call__(self, conn): - self.times += 1 - logger.info('Retrying {}'.format(self.times)) - - class Func(): - def __init__(self): - self.executed = False - - def __call__(self): - self.executed = True - - max_retry = 3 - - RetryObj = Retry() - - c = Connection('client', - uri='xx', - max_retry=max_retry, - on_retry_func=RetryObj) - c.conn = FAIL_CONN - ff = Func() - this_connect = c.connect(func=ff) - with pytest.raises(exceptions.ConnectionConnectError): - this_connect() - assert RetryObj.times == max_retry - assert not ff.executed - RetryObj = Retry() - - c.conn = PASS_CONN - this_connect = c.connect(func=ff) - this_connect() - assert ff.executed - assert RetryObj.times == 0 - - this_connect = c.connect(func=None) - with pytest.raises(TypeError): - this_connect() - - errors = [] - - def error_handler(err): - errors.append(err) - - this_connect = c.connect(func=None, exception_handler=error_handler) - this_connect() - assert len(errors) == 1 diff --git a/mishards/test_models.py b/mishards/test_models.py deleted file mode 100644 index d60b62713e..0000000000 --- a/mishards/test_models.py +++ /dev/null @@ -1,39 +0,0 @@ -import logging -import pytest -from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory -from mishards import db, create_app, settings -from mishards.factories import ( - Tables, TableFiles, - TablesFactory, TableFilesFactory -) - -logger = logging.getLogger(__name__) - - -@pytest.mark.usefixtures('app') -class TestModels: - def test_files_to_search(self): - table = TablesFactory() - new_files_cnt = 5 - to_index_cnt = 10 - raw_cnt = 20 - backup_cnt = 12 - to_delete_cnt = 9 - index_cnt = 8 - new_index_cnt = 6 - new_merge_cnt = 11 - - new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) - to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) - raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) - backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) - index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) - new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) - new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) - to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) - assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt - - assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt - assert table.files_to_search([(111, 120)]).count() == 0 - assert table.files_to_search([(111, 121)]).count() == raw_cnt - assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/mishards/test_server.py b/mishards/test_server.py deleted file mode 100644 index efd3912076..0000000000 --- a/mishards/test_server.py +++ /dev/null @@ -1,279 +0,0 @@ -import logging -import pytest -import mock -import datetime -import random -import faker -import inspect -from milvus import Milvus -from milvus.client.types import Status, IndexType, MetricType -from milvus.client.abstract import IndexParam, TableSchema -from milvus.grpc_gen import status_pb2, milvus_pb2 -from mishards import db, create_app, settings -from mishards.service_handler import ServiceHandler -from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser -from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables -from mishards.routings import RouterMixin - -logger = logging.getLogger(__name__) - -OK = Status(code=Status.SUCCESS, message='Success') -BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') - - -@pytest.mark.usefixtures('started_app') -class TestServer: - @property - def client(self): - m = Milvus() - m.connect(host='localhost', port=settings.SERVER_TEST_PORT) - return m - - def test_server_start(self, started_app): - assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER - - def test_cmd(self, started_app): - ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, - '')) - status, _ = self.client.server_version() - assert status.OK() - - Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) - status, _ = self.client.server_version() - assert not status.OK() - - def test_drop_index(self, started_app): - table_name = inspect.currentframe().f_code.co_name - ServiceHandler._drop_index = mock.MagicMock(return_value=OK) - status = self.client.drop_index(table_name) - assert status.OK() - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status = self.client.drop_index(table_name) - assert not status.OK() - - def test_describe_index(self, started_app): - table_name = inspect.currentframe().f_code.co_name - index_type = IndexType.FLAT - nlist = 1 - index_param = IndexParam(table_name=table_name, - index_type=index_type, - nlist=nlist) - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._describe_index = mock.MagicMock( - return_value=(OK, index_param)) - status, ret = self.client.describe_index(table_name) - assert status.OK() - assert ret._table_name == index_param._table_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status, _ = self.client.describe_index(table_name) - assert not status.OK() - - def test_preload(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._preload_table = mock.MagicMock(return_value=OK) - status = self.client.preload_table(table_name) - assert status.OK() - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status = self.client.preload_table(table_name) - assert not status.OK() - - @pytest.mark.skip - def test_delete_by_range(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - unpacked = table_name, datetime.datetime.today( - ), datetime.datetime.today() - - Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( - return_value=(OK, unpacked)) - ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) - status = self.client.delete_vectors_by_range( - *unpacked) - assert status.OK() - - Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( - return_value=(BAD, unpacked)) - status = self.client.delete_vectors_by_range( - *unpacked) - assert not status.OK() - - def test_count_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - count = random.randint(100, 200) - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) - status, ret = self.client.get_table_row_count(table_name) - assert status.OK() - assert ret == count - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status, _ = self.client.get_table_row_count(table_name) - assert not status.OK() - - def test_show_tables(self, started_app): - tables = ['t1', 't2'] - ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) - status, ret = self.client.show_tables() - assert status.OK() - assert ret == tables - - def test_describe_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - dimension = 128 - nlist = 1 - table_schema = TableSchema(table_name=table_name, - index_file_size=100, - metric_type=MetricType.L2, - dimension=dimension) - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_schema.table_name)) - ServiceHandler._describe_table = mock.MagicMock( - return_value=(OK, table_schema)) - status, _ = self.client.describe_table(table_name) - assert status.OK() - - ServiceHandler._describe_table = mock.MagicMock( - return_value=(BAD, table_schema)) - status, _ = self.client.describe_table(table_name) - assert not status.OK() - - Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, - 'cmd')) - status, ret = self.client.describe_table(table_name) - assert not status.OK() - - def test_insert(self, started_app): - table_name = inspect.currentframe().f_code.co_name - vectors = [[random.random() for _ in range(16)] for _ in range(10)] - ids = [random.randint(1000000, 20000000) for _ in range(10)] - ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) - status, ret = self.client.add_vectors( - table_name=table_name, records=vectors) - assert status.OK() - assert ids == ret - - def test_create_index(self, started_app): - table_name = inspect.currentframe().f_code.co_name - unpacks = table_name, None - Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, - unpacks)) - ServiceHandler._create_index = mock.MagicMock(return_value=OK) - status = self.client.create_index(table_name=table_name) - assert status.OK() - - Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, - None)) - status = self.client.create_index(table_name=table_name) - assert not status.OK() - - def test_drop_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._delete_table = mock.MagicMock(return_value=OK) - status = self.client.delete_table(table_name=table_name) - assert status.OK() - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status = self.client.delete_table(table_name=table_name) - assert not status.OK() - - def test_has_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(OK, table_name)) - ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) - has = self.client.has_table(table_name=table_name) - assert has - - Parser.parse_proto_TableName = mock.MagicMock( - return_value=(BAD, table_name)) - status, has = self.client.has_table(table_name=table_name) - assert not status.OK() - assert not has - - def test_create_table(self, started_app): - table_name = inspect.currentframe().f_code.co_name - dimension = 128 - table_schema = dict(table_name=table_name, - index_file_size=100, - metric_type=MetricType.L2, - dimension=dimension) - - ServiceHandler._create_table = mock.MagicMock(return_value=OK) - status = self.client.create_table(table_schema) - assert status.OK() - - Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, - None)) - status = self.client.create_table(table_schema) - assert not status.OK() - - def random_data(self, n, dimension): - return [[random.random() for _ in range(dimension)] for _ in range(n)] - - def test_search(self, started_app): - table_name = inspect.currentframe().f_code.co_name - to_index_cnt = random.randint(10, 20) - table = TablesFactory(table_id=table_name, state=Tables.NORMAL) - to_index_files = TableFilesFactory.create_batch( - to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) - topk = random.randint(5, 10) - nq = random.randint(5, 10) - param = { - 'table_name': table_name, - 'query_records': self.random_data(nq, table.dimension), - 'top_k': topk, - 'nprobe': 2049 - } - - result = [ - milvus_pb2.TopKQueryResult(query_result_arrays=[ - milvus_pb2.QueryResult(id=i, distance=random.random()) - for i in range(topk) - ]) for i in range(nq) - ] - - mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( - error_code=status_pb2.SUCCESS, reason="Success"), - topk_query_result=result) - - table_schema = TableSchema(table_name=table_name, - index_file_size=table.index_file_size, - metric_type=table.metric_type, - dimension=table.dimension) - - status, _ = self.client.search_vectors(**param) - assert status.code == Status.ILLEGAL_ARGUMENT - - param['nprobe'] = 2048 - RouterMixin.connection = mock.MagicMock(return_value=Milvus()) - RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) - Milvus.describe_table = mock.MagicMock(return_value=(BAD, - table_schema)) - status, ret = self.client.search_vectors(**param) - assert status.code == Status.TABLE_NOT_EXISTS - - Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) - Milvus.search_vectors_in_files = mock.MagicMock( - return_value=mock_results) - - status, ret = self.client.search_vectors(**param) - assert status.OK() - assert len(ret) == nq diff --git a/mishards/utilities.py b/mishards/utilities.py deleted file mode 100644 index 42e982b5f1..0000000000 --- a/mishards/utilities.py +++ /dev/null @@ -1,20 +0,0 @@ -import datetime -from mishards import exceptions - - -def format_date(start, end): - return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, - (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) - - -def range_to_date(range_obj, metadata=None): - try: - start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') - end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') - assert start < end - except (ValueError, AssertionError): - raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( - range_obj.start_date, range_obj.end_date), - metadata=metadata) - - return format_date(start, end) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index ae224e92ed..0000000000 --- a/requirements.txt +++ /dev/null @@ -1,36 +0,0 @@ -environs==4.2.0 -factory-boy==2.12.0 -Faker==1.0.7 -fire==0.1.3 -google-auth==1.6.3 -grpcio==1.22.0 -grpcio-tools==1.22.0 -kubernetes==10.0.1 -MarkupSafe==1.1.1 -marshmallow==2.19.5 -pymysql==0.9.3 -protobuf==3.9.1 -py==1.8.0 -pyasn1==0.4.7 -pyasn1-modules==0.2.6 -pylint==2.3.1 -pymilvus-test==0.2.28 -#pymilvus==0.2.0 -pyparsing==2.4.0 -pytest==4.6.3 -pytest-level==0.1.1 -pytest-print==0.1.2 -pytest-repeat==0.8.0 -pytest-timeout==1.3.3 -python-dateutil==2.8.0 -python-dotenv==0.10.3 -pytz==2019.1 -requests==2.22.0 -requests-oauthlib==1.2.0 -rsa==4.0 -six==1.12.0 -SQLAlchemy==1.3.5 -urllib3==1.25.3 -jaeger-client>=3.4.0 -grpcio-opentracing>=1.0 -mock==2.0.0 diff --git a/sd/__init__.py b/sd/__init__.py deleted file mode 100644 index 7943887d0f..0000000000 --- a/sd/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging -import inspect -# from utils import singleton - -logger = logging.getLogger(__name__) - - -class ProviderManager: - PROVIDERS = {} - - @classmethod - def register_service_provider(cls, target): - if inspect.isfunction(target): - cls.PROVIDERS[target.__name__] = target - elif inspect.isclass(target): - name = target.__dict__.get('NAME', None) - name = name if name else target.__class__.__name__ - cls.PROVIDERS[name] = target - else: - assert False, 'Cannot register_service_provider for: {}'.format(target) - return target - - @classmethod - def get_provider(cls, name): - return cls.PROVIDERS.get(name, None) - - -from sd import kubernetes_provider, static_provider diff --git a/sd/kubernetes_provider.py b/sd/kubernetes_provider.py deleted file mode 100644 index eb113db007..0000000000 --- a/sd/kubernetes_provider.py +++ /dev/null @@ -1,331 +0,0 @@ -import os -import sys -if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))) - -import re -import logging -import time -import copy -import threading -import queue -import enum -from kubernetes import client, config, watch - -from utils import singleton -from sd import ProviderManager - -logger = logging.getLogger(__name__) - -INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' - - -class EventType(enum.Enum): - PodHeartBeat = 1 - Watch = 2 - - -class K8SMixin: - def __init__(self, namespace, in_cluster=False, **kwargs): - self.namespace = namespace - self.in_cluster = in_cluster - self.kwargs = kwargs - self.v1 = kwargs.get('v1', None) - if not self.namespace: - self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() - - if not self.v1: - config.load_incluster_config( - ) if self.in_cluster else config.load_kube_config() - self.v1 = client.CoreV1Api() - - -class K8SHeartbeatHandler(threading.Thread, K8SMixin): - def __init__(self, - message_queue, - namespace, - label_selector, - in_cluster=False, - **kwargs): - K8SMixin.__init__(self, - namespace=namespace, - in_cluster=in_cluster, - **kwargs) - threading.Thread.__init__(self) - self.queue = message_queue - self.terminate = False - self.label_selector = label_selector - self.poll_interval = kwargs.get('poll_interval', 5) - - def run(self): - while not self.terminate: - try: - pods = self.v1.list_namespaced_pod( - namespace=self.namespace, - label_selector=self.label_selector) - event_message = {'eType': EventType.PodHeartBeat, 'events': []} - for item in pods.items: - pod = self.v1.read_namespaced_pod(name=item.metadata.name, - namespace=self.namespace) - name = pod.metadata.name - ip = pod.status.pod_ip - phase = pod.status.phase - reason = pod.status.reason - message = pod.status.message - ready = True if phase == 'Running' else False - - pod_event = dict(pod=name, - ip=ip, - ready=ready, - reason=reason, - message=message) - - event_message['events'].append(pod_event) - - self.queue.put(event_message) - - except Exception as exc: - logger.error(exc) - - time.sleep(self.poll_interval) - - def stop(self): - self.terminate = True - - -class K8SEventListener(threading.Thread, K8SMixin): - def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): - K8SMixin.__init__(self, - namespace=namespace, - in_cluster=in_cluster, - **kwargs) - threading.Thread.__init__(self) - self.queue = message_queue - self.terminate = False - self.at_start_up = True - self._stop_event = threading.Event() - - def stop(self): - self.terminate = True - self._stop_event.set() - - def run(self): - resource_version = '' - w = watch.Watch() - for event in w.stream(self.v1.list_namespaced_event, - namespace=self.namespace, - field_selector='involvedObject.kind=Pod'): - if self.terminate: - break - - resource_version = int(event['object'].metadata.resource_version) - - info = dict( - eType=EventType.Watch, - pod=event['object'].involved_object.name, - reason=event['object'].reason, - message=event['object'].message, - start_up=self.at_start_up, - ) - self.at_start_up = False - # logger.info('Received event: {}'.format(info)) - self.queue.put(info) - - -class EventHandler(threading.Thread): - def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): - threading.Thread.__init__(self) - self.mgr = mgr - self.queue = message_queue - self.kwargs = kwargs - self.terminate = False - self.pod_patt = re.compile(pod_patt) - self.namespace = namespace - - def stop(self): - self.terminate = True - - def on_drop(self, event, **kwargs): - pass - - def on_pod_started(self, event, **kwargs): - try_cnt = 3 - pod = None - while try_cnt > 0: - try_cnt -= 1 - try: - pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], - namespace=self.namespace) - if not pod.status.pod_ip: - time.sleep(0.5) - continue - break - except client.rest.ApiException as exc: - time.sleep(0.5) - - if try_cnt <= 0 and not pod: - if not event['start_up']: - logger.error('Pod {} is started but cannot read pod'.format( - event['pod'])) - return - elif try_cnt <= 0 and not pod.status.pod_ip: - logger.warning('NoPodIPFoundError') - return - - logger.info('Register POD {} with IP {}'.format( - pod.metadata.name, pod.status.pod_ip)) - self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) - - def on_pod_killing(self, event, **kwargs): - logger.info('Unregister POD {}'.format(event['pod'])) - self.mgr.delete_pod(name=event['pod']) - - def on_pod_heartbeat(self, event, **kwargs): - names = self.mgr.conn_mgr.conn_names - - running_names = set() - for each_event in event['events']: - if each_event['ready']: - self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) - running_names.add(each_event['pod']) - else: - self.mgr.delete_pod(name=each_event['pod']) - - to_delete = names - running_names - for name in to_delete: - self.mgr.delete_pod(name) - - logger.info(self.mgr.conn_mgr.conn_names) - - def handle_event(self, event): - if event['eType'] == EventType.PodHeartBeat: - return self.on_pod_heartbeat(event) - - if not event or (event['reason'] not in ('Started', 'Killing')): - return self.on_drop(event) - - if not re.match(self.pod_patt, event['pod']): - return self.on_drop(event) - - logger.info('Handling event: {}'.format(event)) - - if event['reason'] == 'Started': - return self.on_pod_started(event) - - return self.on_pod_killing(event) - - def run(self): - while not self.terminate: - try: - event = self.queue.get(timeout=1) - self.handle_event(event) - except queue.Empty: - continue - - -class KubernetesProviderSettings: - def __init__(self, namespace, pod_patt, label_selector, in_cluster, - poll_interval, port=None, **kwargs): - self.namespace = namespace - self.pod_patt = pod_patt - self.label_selector = label_selector - self.in_cluster = in_cluster - self.poll_interval = poll_interval - self.port = int(port) if port else 19530 - - -@singleton -@ProviderManager.register_service_provider -class KubernetesProvider(object): - NAME = 'Kubernetes' - - def __init__(self, settings, conn_mgr, **kwargs): - self.namespace = settings.namespace - self.pod_patt = settings.pod_patt - self.label_selector = settings.label_selector - self.in_cluster = settings.in_cluster - self.poll_interval = settings.poll_interval - self.port = settings.port - self.kwargs = kwargs - self.queue = queue.Queue() - - self.conn_mgr = conn_mgr - - if not self.namespace: - self.namespace = open(incluster_namespace_path).read() - - config.load_incluster_config( - ) if self.in_cluster else config.load_kube_config() - self.v1 = client.CoreV1Api() - - self.listener = K8SEventListener(message_queue=self.queue, - namespace=self.namespace, - in_cluster=self.in_cluster, - v1=self.v1, - **kwargs) - - self.pod_heartbeater = K8SHeartbeatHandler( - message_queue=self.queue, - namespace=self.namespace, - label_selector=self.label_selector, - in_cluster=self.in_cluster, - v1=self.v1, - poll_interval=self.poll_interval, - **kwargs) - - self.event_handler = EventHandler(mgr=self, - message_queue=self.queue, - namespace=self.namespace, - pod_patt=self.pod_patt, - **kwargs) - - def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) - - def delete_pod(self, name): - self.conn_mgr.unregister(name) - - def start(self): - self.listener.daemon = True - self.listener.start() - self.event_handler.start() - - self.pod_heartbeater.start() - - def stop(self): - self.listener.stop() - self.pod_heartbeater.stop() - self.event_handler.stop() - - -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO) - - class Connect: - def register(self, name, value): - logger.error('Register: {} - {}'.format(name, value)) - - def unregister(self, name): - logger.error('Unregister: {}'.format(name)) - - @property - def conn_names(self): - return set() - - connect_mgr = Connect() - - settings = KubernetesProviderSettings(namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) - - provider_class = ProviderManager.get_provider('Kubernetes') - t = provider_class(conn_mgr=connect_mgr, settings=settings) - t.start() - cnt = 100 - while cnt > 0: - time.sleep(2) - cnt -= 1 - t.stop() diff --git a/sd/static_provider.py b/sd/static_provider.py deleted file mode 100644 index e88780740f..0000000000 --- a/sd/static_provider.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import sys -if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -import socket -from utils import singleton -from sd import ProviderManager - - -class StaticProviderSettings: - def __init__(self, hosts, port=None): - self.hosts = hosts - self.port = int(port) if port else 19530 - - -@singleton -@ProviderManager.register_service_provider -class KubernetesProvider(object): - NAME = 'Static' - - def __init__(self, settings, conn_mgr, **kwargs): - self.conn_mgr = conn_mgr - self.hosts = [socket.gethostbyname(host) for host in settings.hosts] - self.port = settings.port - - def start(self): - for host in self.hosts: - self.add_pod(host, host) - - def stop(self): - for host in self.hosts: - self.delete_pod(host) - - def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) - - def delete_pod(self, name): - self.conn_mgr.unregister(name) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 4a88432914..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[tool:pytest] -testpaths = mishards -log_cli=true -log_cli_level=info diff --git a/start_services.yml b/start_services.yml deleted file mode 100644 index 57fe061bb7..0000000000 --- a/start_services.yml +++ /dev/null @@ -1,45 +0,0 @@ -version: "2.3" -services: - milvus: - runtime: nvidia - restart: always - image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de - # ports: - # - "0.0.0.0:19530:19530" - volumes: - - /tmp/milvus/db:/opt/milvus/db - - jaeger: - restart: always - image: jaegertracing/all-in-one:1.14 - ports: - - "0.0.0.0:5775:5775/udp" - - "0.0.0.0:16686:16686" - - "0.0.0.0:9441:9441" - environment: - COLLECTOR_ZIPKIN_HTTP_PORT: 9411 - - mishards: - restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.4 - ports: - - "0.0.0.0:19530:19531" - - "0.0.0.0:19532:19532" - volumes: - - /tmp/milvus/db:/tmp/milvus/db - # - /tmp/mishards_env:/source/mishards/.env - command: ["python", "mishards/main.py"] - environment: - FROM_EXAMPLE: 'true' - DEBUG: 'true' - SERVER_PORT: 19531 - WOSERVER: tcp://milvus:19530 - SD_STATIC_HOSTS: milvus - TRACING_TYPE: jaeger - TRACING_SERVICE_NAME: mishards-demo - TRACING_REPORTING_HOST: jaeger - TRACING_REPORTING_PORT: 5775 - - depends_on: - - milvus - - jaeger diff --git a/tracing/__init__.py b/tracing/__init__.py deleted file mode 100644 index 64a5b50d15..0000000000 --- a/tracing/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -from contextlib import contextmanager - - -def empty_server_interceptor_decorator(target_server, interceptor): - return target_server - - -@contextmanager -def EmptySpan(*args, **kwargs): - yield None - return - - -class Tracer: - def __init__(self, - tracer=None, - interceptor=None, - server_decorator=empty_server_interceptor_decorator): - self.tracer = tracer - self.interceptor = interceptor - self.server_decorator = server_decorator - - def decorate(self, server): - return self.server_decorator(server, self.interceptor) - - @property - def empty(self): - return self.tracer is None - - def close(self): - self.tracer and self.tracer.close() - - def start_span(self, - operation_name=None, - child_of=None, - references=None, - tags=None, - start_time=None, - ignore_active_span=False): - if self.empty: - return EmptySpan() - return self.tracer.start_span(operation_name, child_of, references, - tags, start_time, ignore_active_span) diff --git a/tracing/factory.py b/tracing/factory.py deleted file mode 100644 index 14fcde2eb3..0000000000 --- a/tracing/factory.py +++ /dev/null @@ -1,40 +0,0 @@ -import logging -from jaeger_client import Config -from grpc_opentracing.grpcext import intercept_server -from grpc_opentracing import open_tracing_server_interceptor - -from tracing import (Tracer, empty_server_interceptor_decorator) - -logger = logging.getLogger(__name__) - - -class TracerFactory: - @classmethod - def new_tracer(cls, - tracer_type, - tracer_config, - span_decorator=None, - **kwargs): - if not tracer_type: - return Tracer() - config = tracer_config.TRACING_CONFIG - service_name = tracer_config.TRACING_SERVICE_NAME - validate = tracer_config.TRACING_VALIDATE - # if not tracer_type: - # tracer_type = 'jaeger' - # config = tracer_config.DEFAULT_TRACING_CONFIG - - if tracer_type.lower() == 'jaeger': - config = Config(config=config, - service_name=service_name, - validate=validate) - - tracer = config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor( - tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) - - return Tracer(tracer, tracer_interceptor, intercept_server) - - assert False, 'Unsupported tracer type: {}'.format(tracer_type) diff --git a/utils/__init__.py b/utils/__init__.py deleted file mode 100644 index c1d55e76c0..0000000000 --- a/utils/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from functools import wraps - - -def singleton(cls): - instances = {} - @wraps(cls) - def getinstance(*args, **kw): - if cls not in instances: - instances[cls] = cls(*args, **kw) - return instances[cls] - return getinstance diff --git a/utils/logger_helper.py b/utils/logger_helper.py deleted file mode 100644 index b4e3b9c5b6..0000000000 --- a/utils/logger_helper.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -import datetime -from pytz import timezone -from logging import Filter -import logging.config - - -class InfoFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.INFO - - -class DebugFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.DEBUG - - -class WarnFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.WARN - - -class ErrorFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.ERROR - - -class CriticalFilter(logging.Filter): - def filter(self, rec): - return rec.levelno == logging.CRITICAL - - -COLORS = { - 'HEADER': '\033[95m', - 'INFO': '\033[92m', - 'DEBUG': '\033[94m', - 'WARNING': '\033[93m', - 'ERROR': '\033[95m', - 'CRITICAL': '\033[91m', - 'ENDC': '\033[0m', -} - - -class ColorFulFormatColMixin: - def format_col(self, message_str, level_name): - if level_name in COLORS.keys(): - message_str = COLORS.get(level_name) + message_str + COLORS.get( - 'ENDC') - return message_str - - -class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): - def format(self, record): - message_str = super(ColorfulFormatter, self).format(record) - - return self.format_col(message_str, level_name=record.levelname) - - -def config(log_level, log_path, name, tz='UTC'): - def build_log_file(level, log_path, name, tz): - utc_now = datetime.datetime.utcnow() - utc_tz = timezone('UTC') - local_tz = timezone(tz) - tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) - return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), - level) - - if not os.path.exists(log_path): - os.makedirs(log_path) - - LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'default': { - 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', - }, - 'colorful_console': { - 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', - '()': ColorfulFormatter, - }, - }, - 'filters': { - 'InfoFilter': { - '()': InfoFilter, - }, - 'DebugFilter': { - '()': DebugFilter, - }, - 'WarnFilter': { - '()': WarnFilter, - }, - 'ErrorFilter': { - '()': ErrorFilter, - }, - 'CriticalFilter': { - '()': CriticalFilter, - }, - }, - 'handlers': { - 'milvus_celery_console': { - 'class': 'logging.StreamHandler', - 'formatter': 'colorful_console', - }, - 'milvus_debug_file': { - 'level': 'DEBUG', - 'filters': ['DebugFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('debug', log_path, name, tz) - }, - 'milvus_info_file': { - 'level': 'INFO', - 'filters': ['InfoFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('info', log_path, name, tz) - }, - 'milvus_warn_file': { - 'level': 'WARN', - 'filters': ['WarnFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('warn', log_path, name, tz) - }, - 'milvus_error_file': { - 'level': 'ERROR', - 'filters': ['ErrorFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('error', log_path, name, tz) - }, - 'milvus_critical_file': { - 'level': 'CRITICAL', - 'filters': ['CriticalFilter'], - 'class': 'logging.handlers.RotatingFileHandler', - 'formatter': 'default', - 'filename': build_log_file('critical', log_path, name, tz) - }, - }, - 'loggers': { - '': { - 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', - 'milvus_error_file', 'milvus_critical_file'], - 'level': log_level, - 'propagate': False - }, - }, - 'propagate': False, - } - - logging.config.dictConfig(LOGGING) From 8553d1c332352d2b044e1f26136369fa71157247 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 21 Oct 2019 16:21:32 +0800 Subject: [PATCH 091/196] Preparing to merge into milvus --- shards/Dockerfile | 10 + shards/build.sh | 39 ++ shards/conftest.py | 27 + shards/manager.py | 28 ++ shards/mishards/.env.example | 33 ++ shards/mishards/__init__.py | 36 ++ shards/mishards/connections.py | 154 ++++++ shards/mishards/db_base.py | 52 ++ shards/mishards/exception_codes.py | 10 + shards/mishards/exception_handlers.py | 82 +++ shards/mishards/exceptions.py | 38 ++ shards/mishards/factories.py | 54 ++ shards/mishards/grpc_utils/__init__.py | 37 ++ .../mishards/grpc_utils/grpc_args_parser.py | 102 ++++ .../mishards/grpc_utils/grpc_args_wrapper.py | 4 + shards/mishards/grpc_utils/test_grpc.py | 75 +++ shards/mishards/hash_ring.py | 150 ++++++ shards/mishards/main.py | 15 + shards/mishards/models.py | 76 +++ shards/mishards/routings.py | 96 ++++ shards/mishards/server.py | 122 +++++ shards/mishards/service_handler.py | 475 ++++++++++++++++++ shards/mishards/settings.py | 94 ++++ shards/mishards/test_connections.py | 101 ++++ shards/mishards/test_models.py | 39 ++ shards/mishards/test_server.py | 279 ++++++++++ shards/mishards/utilities.py | 20 + shards/requirements.txt | 36 ++ shards/sd/__init__.py | 28 ++ shards/sd/kubernetes_provider.py | 331 ++++++++++++ shards/sd/static_provider.py | 39 ++ shards/setup.cfg | 4 + shards/start_services.yml | 45 ++ shards/tracing/__init__.py | 43 ++ shards/tracing/factory.py | 40 ++ shards/utils/__init__.py | 11 + shards/utils/logger_helper.py | 152 ++++++ 37 files changed, 2977 insertions(+) create mode 100644 shards/Dockerfile create mode 100755 shards/build.sh create mode 100644 shards/conftest.py create mode 100644 shards/manager.py create mode 100644 shards/mishards/.env.example create mode 100644 shards/mishards/__init__.py create mode 100644 shards/mishards/connections.py create mode 100644 shards/mishards/db_base.py create mode 100644 shards/mishards/exception_codes.py create mode 100644 shards/mishards/exception_handlers.py create mode 100644 shards/mishards/exceptions.py create mode 100644 shards/mishards/factories.py create mode 100644 shards/mishards/grpc_utils/__init__.py create mode 100644 shards/mishards/grpc_utils/grpc_args_parser.py create mode 100644 shards/mishards/grpc_utils/grpc_args_wrapper.py create mode 100644 shards/mishards/grpc_utils/test_grpc.py create mode 100644 shards/mishards/hash_ring.py create mode 100644 shards/mishards/main.py create mode 100644 shards/mishards/models.py create mode 100644 shards/mishards/routings.py create mode 100644 shards/mishards/server.py create mode 100644 shards/mishards/service_handler.py create mode 100644 shards/mishards/settings.py create mode 100644 shards/mishards/test_connections.py create mode 100644 shards/mishards/test_models.py create mode 100644 shards/mishards/test_server.py create mode 100644 shards/mishards/utilities.py create mode 100644 shards/requirements.txt create mode 100644 shards/sd/__init__.py create mode 100644 shards/sd/kubernetes_provider.py create mode 100644 shards/sd/static_provider.py create mode 100644 shards/setup.cfg create mode 100644 shards/start_services.yml create mode 100644 shards/tracing/__init__.py create mode 100644 shards/tracing/factory.py create mode 100644 shards/utils/__init__.py create mode 100644 shards/utils/logger_helper.py diff --git a/shards/Dockerfile b/shards/Dockerfile new file mode 100644 index 0000000000..594640619e --- /dev/null +++ b/shards/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.6 +RUN apt update && apt install -y \ + less \ + telnet +RUN mkdir /source +WORKDIR /source +ADD ./requirements.txt ./ +RUN pip install -r requirements.txt +COPY . . +CMD python mishards/main.py diff --git a/shards/build.sh b/shards/build.sh new file mode 100755 index 0000000000..fad30518f2 --- /dev/null +++ b/shards/build.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +BOLD=`tput bold` +NORMAL=`tput sgr0` +YELLOW='\033[1;33m' +ENDC='\033[0m' + +echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" + +function build_image() { + dockerfile=$1 + remote_registry=$2 + tagged=$2 + buildcmd="docker build -t ${tagged} -f ${dockerfile} ." + echo -e "${BOLD}$buildcmd${NORMAL}" + $buildcmd + pushcmd="docker push ${remote_registry}" + echo -e "${BOLD}$pushcmd${NORMAL}" + $pushcmd + echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" +} + +case "$1" in + +all) + [[ -z $MISHARDS_REGISTRY ]] && { + echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" + exit 1 + } + + version="" + [[ ! -z $2 ]] && version=":${2}" + build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" + ;; +*) + echo "Usage: [option...] {base | apps}" + echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" + ;; +esac diff --git a/shards/conftest.py b/shards/conftest.py new file mode 100644 index 0000000000..34e22af693 --- /dev/null +++ b/shards/conftest.py @@ -0,0 +1,27 @@ +import logging +import pytest +import grpc +from mishards import settings, db, create_app + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def app(request): + app = create_app(settings.TestingConfig) + db.drop_all() + db.create_all() + + yield app + + db.drop_all() + + +@pytest.fixture +def started_app(app): + app.on_pre_run() + app.start(settings.SERVER_TEST_PORT) + + yield app + + app.stop() diff --git a/shards/manager.py b/shards/manager.py new file mode 100644 index 0000000000..931c90ebc8 --- /dev/null +++ b/shards/manager.py @@ -0,0 +1,28 @@ +import fire +from mishards import db +from sqlalchemy import and_ + + +class DBHandler: + @classmethod + def create_all(cls): + db.create_all() + + @classmethod + def drop_all(cls): + db.drop_all() + + @classmethod + def fun(cls, tid): + from mishards.factories import TablesFactory, TableFilesFactory, Tables + f = db.Session.query(Tables).filter(and_( + Tables.table_id == tid, + Tables.state != Tables.TO_DELETE) + ).first() + print(f) + + # f1 = TableFilesFactory() + + +if __name__ == '__main__': + fire.Fire(DBHandler) diff --git a/shards/mishards/.env.example b/shards/mishards/.env.example new file mode 100644 index 0000000000..0a23c0cf56 --- /dev/null +++ b/shards/mishards/.env.example @@ -0,0 +1,33 @@ +DEBUG=True + +WOSERVER=tcp://127.0.0.1:19530 +SERVER_PORT=19532 +SERVER_TEST_PORT=19888 + +SD_PROVIDER=Static + +SD_NAMESPACE=xp +SD_IN_CLUSTER=False +SD_POLL_INTERVAL=5 +SD_ROSERVER_POD_PATT=.*-ro-servers-.* +SD_LABEL_SELECTOR=tier=ro-servers + +SD_STATIC_HOSTS=127.0.0.1 +SD_STATIC_PORT=19530 + +#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_ECHO=True + +#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 +SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False +SQL_TEST_ECHO=False + +# TRACING_TEST_TYPE=jaeger +TRACING_TYPE=jaeger +TRACING_SERVICE_NAME=fortest +TRACING_SAMPLER_TYPE=const +TRACING_SAMPLER_PARAM=1 +TRACING_LOG_PAYLOAD=True +#TRACING_SAMPLER_TYPE=probabilistic +#TRACING_SAMPLER_PARAM=0.5 diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py new file mode 100644 index 0000000000..7db3d8cb5e --- /dev/null +++ b/shards/mishards/__init__.py @@ -0,0 +1,36 @@ +import logging +from mishards import settings +logger = logging.getLogger() + +from mishards.db_base import DB +db = DB() + +from mishards.server import Server +grpc_server = Server() + + +def create_app(testing_config=None): + config = testing_config if testing_config else settings.DefaultConfig + db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO) + + from mishards.connections import ConnectionMgr + connect_mgr = ConnectionMgr() + + from sd import ProviderManager + + sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) + discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) + + from tracing.factory import TracerFactory + from mishards.grpc_utils import GrpcSpanDecorator + tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) + + from mishards.routings import RouterFactory + router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) + + grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) + + from mishards import exception_handlers + + return grpc_server diff --git a/shards/mishards/connections.py b/shards/mishards/connections.py new file mode 100644 index 0000000000..618690a099 --- /dev/null +++ b/shards/mishards/connections.py @@ -0,0 +1,154 @@ +import logging +import threading +from functools import wraps +from milvus import Milvus + +from mishards import (settings, exceptions) +from utils import singleton + +logger = logging.getLogger(__name__) + + +class Connection: + def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs): + self.name = name + self.uri = uri + self.max_retry = max_retry + self.retried = 0 + self.conn = Milvus() + self.error_handlers = [] if not error_handlers else error_handlers + self.on_retry_func = kwargs.get('on_retry_func', None) + # self._connect() + + def __str__(self): + return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri) + + def _connect(self, metadata=None): + try: + self.conn.connect(uri=self.uri) + except Exception as e: + if not self.error_handlers: + raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata) + for handler in self.error_handlers: + handler(e, metadata=metadata) + + @property + def can_retry(self): + return self.retried < self.max_retry + + @property + def connected(self): + return self.conn.connected() + + def on_retry(self): + if self.on_retry_func: + self.on_retry_func(self) + else: + self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried)) + + def on_connect(self, metadata=None): + while not self.connected and self.can_retry: + self.retried += 1 + self.on_retry() + self._connect(metadata=metadata) + + if not self.can_retry and not self.connected: + raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry, + metadata=metadata)) + + self.retried = 0 + + def connect(self, func, exception_handler=None): + @wraps(func) + def inner(*args, **kwargs): + self.on_connect() + try: + return func(*args, **kwargs) + except Exception as e: + if exception_handler: + exception_handler(e) + else: + raise e + return inner + + +@singleton +class ConnectionMgr: + def __init__(self): + self.metas = {} + self.conns = {} + + @property + def conn_names(self): + return set(self.metas.keys()) - set(['WOSERVER']) + + def conn(self, name, metadata, throw=False): + c = self.conns.get(name, None) + if not c: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name), + metadata=metadata) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + threaded = { + threading.get_ident(): this_conn + } + self.conns[name] = threaded + return this_conn + + tid = threading.get_ident() + rconn = c.get(tid, None) + if not rconn: + url = self.metas.get(name, None) + if not url: + if not throw: + return None + raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name), + metadata=metadata) + this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY) + c[tid] = this_conn + return this_conn + + return rconn + + def on_new_meta(self, name, url): + logger.info('Register Connection: name={};url={}'.format(name, url)) + self.metas[name] = url + + def on_duplicate_meta(self, name, url): + if self.metas[name] == url: + return self.on_same_meta(name, url) + + return self.on_diff_meta(name, url) + + def on_same_meta(self, name, url): + # logger.warning('Register same meta: {}:{}'.format(name, url)) + pass + + def on_diff_meta(self, name, url): + logger.warning('Received {} with diff url={}'.format(name, url)) + self.metas[name] = url + self.conns[name] = {} + + def on_unregister_meta(self, name, url): + logger.info('Unregister name={};url={}'.format(name, url)) + self.conns.pop(name, None) + + def on_nonexisted_meta(self, name): + logger.warning('Non-existed meta: {}'.format(name)) + + def register(self, name, url): + meta = self.metas.get(name) + if not meta: + return self.on_new_meta(name, url) + else: + return self.on_duplicate_meta(name, url) + + def unregister(self, name): + logger.info('Unregister Connection: name={}'.format(name)) + url = self.metas.pop(name, None) + if url is None: + return self.on_nonexisted_meta(name) + return self.on_unregister_meta(name, url) diff --git a/shards/mishards/db_base.py b/shards/mishards/db_base.py new file mode 100644 index 0000000000..5f2eee9ba1 --- /dev/null +++ b/shards/mishards/db_base.py @@ -0,0 +1,52 @@ +import logging +from sqlalchemy import create_engine +from sqlalchemy.engine.url import make_url +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker, scoped_session +from sqlalchemy.orm.session import Session as SessionBase + +logger = logging.getLogger(__name__) + + +class LocalSession(SessionBase): + def __init__(self, db, autocommit=False, autoflush=True, **options): + self.db = db + bind = options.pop('bind', None) or db.engine + SessionBase.__init__(self, autocommit=autocommit, autoflush=autoflush, bind=bind, **options) + + +class DB: + Model = declarative_base() + + def __init__(self, uri=None, echo=False): + self.echo = echo + uri and self.init_db(uri, echo) + self.session_factory = scoped_session(sessionmaker(class_=LocalSession, db=self)) + + def init_db(self, uri, echo=False): + url = make_url(uri) + if url.get_backend_name() == 'sqlite': + self.engine = create_engine(url) + else: + self.engine = create_engine(uri, pool_size=100, pool_recycle=5, pool_timeout=30, + pool_pre_ping=True, + echo=echo, + max_overflow=0) + self.uri = uri + self.url = url + + def __str__(self): + return ''.format(self.url.get_backend_name(), self.url.database) + + @property + def Session(self): + return self.session_factory() + + def remove_session(self): + self.session_factory.remove() + + def drop_all(self): + self.Model.metadata.drop_all(self.engine) + + def create_all(self): + self.Model.metadata.create_all(self.engine) diff --git a/shards/mishards/exception_codes.py b/shards/mishards/exception_codes.py new file mode 100644 index 0000000000..bdd4572dd5 --- /dev/null +++ b/shards/mishards/exception_codes.py @@ -0,0 +1,10 @@ +INVALID_CODE = -1 + +CONNECT_ERROR_CODE = 10001 +CONNECTTION_NOT_FOUND_CODE = 10002 +DB_ERROR_CODE = 10003 + +TABLE_NOT_FOUND_CODE = 20001 +INVALID_ARGUMENT_CODE = 20002 +INVALID_DATE_RANGE_CODE = 20003 +INVALID_TOPK_CODE = 20004 diff --git a/shards/mishards/exception_handlers.py b/shards/mishards/exception_handlers.py new file mode 100644 index 0000000000..c79a6db5a3 --- /dev/null +++ b/shards/mishards/exception_handlers.py @@ -0,0 +1,82 @@ +import logging +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from mishards import grpc_server as server, exceptions + +logger = logging.getLogger(__name__) + + +def resp_handler(err, error_code): + if not isinstance(err, exceptions.BaseException): + return status_pb2.Status(error_code=error_code, reason=str(err)) + + status = status_pb2.Status(error_code=error_code, reason=err.message) + + if err.metadata is None: + return status + + resp_class = err.metadata.get('resp_class', None) + if not resp_class: + return status + + if resp_class == milvus_pb2.BoolReply: + return resp_class(status=status, bool_reply=False) + + if resp_class == milvus_pb2.VectorIds: + return resp_class(status=status, vector_id_array=[]) + + if resp_class == milvus_pb2.TopKQueryResultList: + return resp_class(status=status, topk_query_result=[]) + + if resp_class == milvus_pb2.TableRowCount: + return resp_class(status=status, table_row_count=-1) + + if resp_class == milvus_pb2.TableName: + return resp_class(status=status, table_name=[]) + + if resp_class == milvus_pb2.StringReply: + return resp_class(status=status, string_reply='') + + if resp_class == milvus_pb2.TableSchema: + return milvus_pb2.TableSchema( + status=status + ) + + if resp_class == milvus_pb2.IndexParam: + return milvus_pb2.IndexParam( + table_name=milvus_pb2.TableName( + status=status + ) + ) + + status.error_code = status_pb2.UNEXPECTED_ERROR + return status + + +@server.errorhandler(exceptions.TableNotFoundError) +def TableNotFoundErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.TABLE_NOT_EXISTS) + + +@server.errorhandler(exceptions.InvalidTopKError) +def InvalidTopKErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_TOPK) + + +@server.errorhandler(exceptions.InvalidArgumentError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_ARGUMENT) + + +@server.errorhandler(exceptions.DBError) +def DBErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.UNEXPECTED_ERROR) + + +@server.errorhandler(exceptions.InvalidRangeError) +def InvalidArgumentErrorHandler(err): + logger.error(err) + return resp_handler(err, status_pb2.ILLEGAL_RANGE) diff --git a/shards/mishards/exceptions.py b/shards/mishards/exceptions.py new file mode 100644 index 0000000000..72839f88d2 --- /dev/null +++ b/shards/mishards/exceptions.py @@ -0,0 +1,38 @@ +import mishards.exception_codes as codes + + +class BaseException(Exception): + code = codes.INVALID_CODE + message = 'BaseException' + + def __init__(self, message='', metadata=None): + self.message = self.__class__.__name__ if not message else message + self.metadata = metadata + + +class ConnectionConnectError(BaseException): + code = codes.CONNECT_ERROR_CODE + + +class ConnectionNotFoundError(BaseException): + code = codes.CONNECTTION_NOT_FOUND_CODE + + +class DBError(BaseException): + code = codes.DB_ERROR_CODE + + +class TableNotFoundError(BaseException): + code = codes.TABLE_NOT_FOUND_CODE + + +class InvalidTopKError(BaseException): + code = codes.INVALID_TOPK_CODE + + +class InvalidArgumentError(BaseException): + code = codes.INVALID_ARGUMENT_CODE + + +class InvalidRangeError(BaseException): + code = codes.INVALID_DATE_RANGE_CODE diff --git a/shards/mishards/factories.py b/shards/mishards/factories.py new file mode 100644 index 0000000000..52c0253b39 --- /dev/null +++ b/shards/mishards/factories.py @@ -0,0 +1,54 @@ +import time +import datetime +import random +import factory +from factory.alchemy import SQLAlchemyModelFactory +from faker import Faker +from faker.providers import BaseProvider + +from milvus.client.types import MetricType +from mishards import db +from mishards.models import Tables, TableFiles + + +class FakerProvider(BaseProvider): + def this_date(self): + t = datetime.datetime.today() + return (t.year - 1900) * 10000 + (t.month - 1) * 100 + t.day + + +factory.Faker.add_provider(FakerProvider) + + +class TablesFactory(SQLAlchemyModelFactory): + class Meta: + model = Tables + sqlalchemy_session = db.session_factory + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table_id = factory.Faker('uuid4') + state = factory.Faker('random_element', elements=(0, 1)) + dimension = factory.Faker('random_element', elements=(256, 512)) + created_on = int(time.time()) + index_file_size = 0 + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + metric_type = factory.Faker('random_element', elements=(MetricType.L2, MetricType.IP)) + nlist = 16384 + + +class TableFilesFactory(SQLAlchemyModelFactory): + class Meta: + model = TableFiles + sqlalchemy_session = db.session_factory + sqlalchemy_session_persistence = 'commit' + + id = factory.Faker('random_number', digits=16, fix_len=True) + table = factory.SubFactory(TablesFactory) + engine_type = factory.Faker('random_element', elements=(0, 1, 2, 3)) + file_id = factory.Faker('uuid4') + file_type = factory.Faker('random_element', elements=(0, 1, 2, 3, 4)) + file_size = factory.Faker('random_number') + updated_time = int(time.time()) + created_on = int(time.time()) + date = factory.Faker('this_date') diff --git a/shards/mishards/grpc_utils/__init__.py b/shards/mishards/grpc_utils/__init__.py new file mode 100644 index 0000000000..f5225b2a66 --- /dev/null +++ b/shards/mishards/grpc_utils/__init__.py @@ -0,0 +1,37 @@ +from grpc_opentracing import SpanDecorator +from milvus.grpc_gen import status_pb2 + + +class GrpcSpanDecorator(SpanDecorator): + def __call__(self, span, rpc_info): + status = None + if not rpc_info.response: + return + if isinstance(rpc_info.response, status_pb2.Status): + status = rpc_info.response + else: + try: + status = rpc_info.response.status + except Exception as e: + status = status_pb2.Status(error_code=status_pb2.UNEXPECTED_ERROR, + reason='Should not happen') + + if status.error_code == 0: + return + error_log = {'event': 'error', + 'request': rpc_info.request, + 'response': rpc_info.response + } + span.set_tag('error', True) + span.log_kv(error_log) + + +def mark_grpc_method(func): + setattr(func, 'grpc_method', True) + return func + + +def is_grpc_method(func): + if not func: + return False + return getattr(func, 'grpc_method', False) diff --git a/shards/mishards/grpc_utils/grpc_args_parser.py b/shards/mishards/grpc_utils/grpc_args_parser.py new file mode 100644 index 0000000000..039299803d --- /dev/null +++ b/shards/mishards/grpc_utils/grpc_args_parser.py @@ -0,0 +1,102 @@ +from milvus import Status +from functools import wraps + + +def error_status(func): + @wraps(func) + def inner(*args, **kwargs): + try: + results = func(*args, **kwargs) + except Exception as e: + return Status(code=Status.UNEXPECTED_ERROR, message=str(e)), None + + return Status(code=0, message="Success"), results + + return inner + + +class GrpcArgsParser(object): + + @classmethod + @error_status + def parse_proto_TableSchema(cls, param): + _table_schema = { + 'status': param.status, + 'table_name': param.table_name, + 'dimension': param.dimension, + 'index_file_size': param.index_file_size, + 'metric_type': param.metric_type + } + + return _table_schema + + @classmethod + @error_status + def parse_proto_TableName(cls, param): + return param.table_name + + @classmethod + @error_status + def parse_proto_Index(cls, param): + _index = { + 'index_type': param.index_type, + 'nlist': param.nlist + } + + return _index + + @classmethod + @error_status + def parse_proto_IndexParam(cls, param): + _table_name = param.table_name + _status, _index = cls.parse_proto_Index(param.index) + + if not _status.OK(): + raise Exception("Argument parse error") + + return _table_name, _index + + @classmethod + @error_status + def parse_proto_Command(cls, param): + _cmd = param.cmd + + return _cmd + + @classmethod + @error_status + def parse_proto_Range(cls, param): + _start_value = param.start_value + _end_value = param.end_value + + return _start_value, _end_value + + @classmethod + @error_status + def parse_proto_RowRecord(cls, param): + return list(param.vector_data) + + @classmethod + @error_status + def parse_proto_SearchParam(cls, param): + _table_name = param.table_name + _topk = param.topk + _nprobe = param.nprobe + _status, _range = cls.parse_proto_Range(param.query_range_array) + + if not _status.OK(): + raise Exception("Argument parse error") + + _row_record = param.query_record_array + + return _table_name, _row_record, _range, _topk + + @classmethod + @error_status + def parse_proto_DeleteByRangeParam(cls, param): + _table_name = param.table_name + _range = param.range + _start_value = _range.start_value + _end_value = _range.end_value + + return _table_name, _start_value, _end_value diff --git a/shards/mishards/grpc_utils/grpc_args_wrapper.py b/shards/mishards/grpc_utils/grpc_args_wrapper.py new file mode 100644 index 0000000000..7447dbd995 --- /dev/null +++ b/shards/mishards/grpc_utils/grpc_args_wrapper.py @@ -0,0 +1,4 @@ +# class GrpcArgsWrapper(object): + +# @classmethod +# def proto_TableName(cls): diff --git a/shards/mishards/grpc_utils/test_grpc.py b/shards/mishards/grpc_utils/test_grpc.py new file mode 100644 index 0000000000..9af09e5d0d --- /dev/null +++ b/shards/mishards/grpc_utils/test_grpc.py @@ -0,0 +1,75 @@ +import logging +import opentracing +from mishards.grpc_utils import GrpcSpanDecorator, is_grpc_method +from milvus.grpc_gen import status_pb2, milvus_pb2 + +logger = logging.getLogger(__name__) + + +class FakeTracer(opentracing.Tracer): + pass + + +class FakeSpan(opentracing.Span): + def __init__(self, context, tracer, **kwargs): + super(FakeSpan, self).__init__(tracer, context) + self.reset() + + def set_tag(self, key, value): + self.tags.append({key: value}) + + def log_kv(self, key_values, timestamp=None): + self.logs.append(key_values) + + def reset(self): + self.tags = [] + self.logs = [] + + +class FakeRpcInfo: + def __init__(self, request, response): + self.request = request + self.response = response + + +class TestGrpcUtils: + def test_span_deco(self): + request = 'request' + OK = status_pb2.Status(error_code=status_pb2.SUCCESS, reason='Success') + response = OK + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = milvus_pb2.BoolReply(status=OK, bool_reply=False) + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + response = 1 + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 1 + assert len(span.tags) == 1 + + response = 0 + rpc_info = FakeRpcInfo(request=request, response=response) + span = FakeSpan(context=None, tracer=FakeTracer()) + span_deco = GrpcSpanDecorator() + span_deco(span, rpc_info) + assert len(span.logs) == 0 + assert len(span.tags) == 0 + + def test_is_grpc_method(self): + target = 1 + assert not is_grpc_method(target) + target = None + assert not is_grpc_method(target) diff --git a/shards/mishards/hash_ring.py b/shards/mishards/hash_ring.py new file mode 100644 index 0000000000..a97f3f580e --- /dev/null +++ b/shards/mishards/hash_ring.py @@ -0,0 +1,150 @@ +import math +import sys +from bisect import bisect + +if sys.version_info >= (2, 5): + import hashlib + md5_constructor = hashlib.md5 +else: + import md5 + md5_constructor = md5.new + + +class HashRing(object): + def __init__(self, nodes=None, weights=None): + """`nodes` is a list of objects that have a proper __str__ representation. + `weights` is dictionary that sets weights to the nodes. The default + weight is that all nodes are equal. + """ + self.ring = dict() + self._sorted_keys = [] + + self.nodes = nodes + + if not weights: + weights = {} + self.weights = weights + + self._generate_circle() + + def _generate_circle(self): + """Generates the circle. + """ + total_weight = 0 + for node in self.nodes: + total_weight += self.weights.get(node, 1) + + for node in self.nodes: + weight = 1 + + if node in self.weights: + weight = self.weights.get(node) + + factor = math.floor((40 * len(self.nodes) * weight) / total_weight) + + for j in range(0, int(factor)): + b_key = self._hash_digest('%s-%s' % (node, j)) + + for i in range(0, 3): + key = self._hash_val(b_key, lambda x: x + i * 4) + self.ring[key] = node + self._sorted_keys.append(key) + + self._sorted_keys.sort() + + def get_node(self, string_key): + """Given a string key a corresponding node in the hash ring is returned. + + If the hash ring is empty, `None` is returned. + """ + pos = self.get_node_pos(string_key) + if pos is None: + return None + return self.ring[self._sorted_keys[pos]] + + def get_node_pos(self, string_key): + """Given a string key a corresponding node in the hash ring is returned + along with it's position in the ring. + + If the hash ring is empty, (`None`, `None`) is returned. + """ + if not self.ring: + return None + + key = self.gen_key(string_key) + + nodes = self._sorted_keys + pos = bisect(nodes, key) + + if pos == len(nodes): + return 0 + else: + return pos + + def iterate_nodes(self, string_key, distinct=True): + """Given a string key it returns the nodes as a generator that can hold the key. + + The generator iterates one time through the ring + starting at the correct position. + + if `distinct` is set, then the nodes returned will be unique, + i.e. no virtual copies will be returned. + """ + if not self.ring: + yield None, None + + returned_values = set() + + def distinct_filter(value): + if str(value) not in returned_values: + returned_values.add(str(value)) + return value + + pos = self.get_node_pos(string_key) + for key in self._sorted_keys[pos:]: + val = distinct_filter(self.ring[key]) + if val: + yield val + + for i, key in enumerate(self._sorted_keys): + if i < pos: + val = distinct_filter(self.ring[key]) + if val: + yield val + + def gen_key(self, key): + """Given a string key it returns a long value, + this long value represents a place on the hash ring. + + md5 is currently used because it mixes well. + """ + b_key = self._hash_digest(key) + return self._hash_val(b_key, lambda x: x) + + def _hash_val(self, b_key, entry_fn): + return (b_key[entry_fn(3)] << 24) | (b_key[entry_fn(2)] << 16) | ( + b_key[entry_fn(1)] << 8) | b_key[entry_fn(0)] + + def _hash_digest(self, key): + m = md5_constructor() + key = key.encode() + m.update(key) + return m.digest() + + +if __name__ == '__main__': + from collections import defaultdict + servers = [ + '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.248:11212', + '192.168.0.249:11212' + ] + + ring = HashRing(servers) + keys = ['{}'.format(i) for i in range(100)] + mapped = defaultdict(list) + for k in keys: + server = ring.get_node(k) + mapped[server].append(k) + + for k, v in mapped.items(): + print(k, v) diff --git a/shards/mishards/main.py b/shards/mishards/main.py new file mode 100644 index 0000000000..c0d142607b --- /dev/null +++ b/shards/mishards/main.py @@ -0,0 +1,15 @@ +import os +import sys +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from mishards import (settings, create_app) + + +def main(): + server = create_app(settings.DefaultConfig) + server.run(port=settings.SERVER_PORT) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/shards/mishards/models.py b/shards/mishards/models.py new file mode 100644 index 0000000000..4b6c8f9ef4 --- /dev/null +++ b/shards/mishards/models.py @@ -0,0 +1,76 @@ +import logging +from sqlalchemy import (Integer, Boolean, Text, + String, BigInteger, and_, or_, + Column) +from sqlalchemy.orm import relationship, backref + +from mishards import db + +logger = logging.getLogger(__name__) + + +class TableFiles(db.Model): + FILE_TYPE_NEW = 0 + FILE_TYPE_RAW = 1 + FILE_TYPE_TO_INDEX = 2 + FILE_TYPE_INDEX = 3 + FILE_TYPE_TO_DELETE = 4 + FILE_TYPE_NEW_MERGE = 5 + FILE_TYPE_NEW_INDEX = 6 + FILE_TYPE_BACKUP = 7 + + __tablename__ = 'TableFiles' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50)) + engine_type = Column(Integer) + file_id = Column(String(50)) + file_type = Column(Integer) + file_size = Column(Integer, default=0) + row_count = Column(Integer, default=0) + updated_time = Column(BigInteger) + created_on = Column(BigInteger) + date = Column(Integer) + + table = relationship( + 'Tables', + primaryjoin='and_(foreign(TableFiles.table_id) == Tables.table_id)', + backref=backref('files', uselist=True, lazy='dynamic') + ) + + +class Tables(db.Model): + TO_DELETE = 1 + NORMAL = 0 + + __tablename__ = 'Tables' + + id = Column(BigInteger, primary_key=True, autoincrement=True) + table_id = Column(String(50), unique=True) + state = Column(Integer) + dimension = Column(Integer) + created_on = Column(Integer) + flag = Column(Integer, default=0) + index_file_size = Column(Integer) + engine_type = Column(Integer) + nlist = Column(Integer) + metric_type = Column(Integer) + + def files_to_search(self, date_range=None): + cond = or_( + TableFiles.file_type == TableFiles.FILE_TYPE_RAW, + TableFiles.file_type == TableFiles.FILE_TYPE_TO_INDEX, + TableFiles.file_type == TableFiles.FILE_TYPE_INDEX, + ) + if date_range: + cond = and_( + cond, + or_( + and_(TableFiles.date >= d[0], TableFiles.date < d[1]) for d in date_range + ) + ) + + files = self.files.filter(cond) + + logger.debug('DATE_RANGE: {}'.format(date_range)) + return files diff --git a/shards/mishards/routings.py b/shards/mishards/routings.py new file mode 100644 index 0000000000..823972726f --- /dev/null +++ b/shards/mishards/routings.py @@ -0,0 +1,96 @@ +import logging +from sqlalchemy import exc as sqlalchemy_exc +from sqlalchemy import and_ + +from mishards import exceptions, db +from mishards.hash_ring import HashRing +from mishards.models import Tables + +logger = logging.getLogger(__name__) + + +class RouteManager: + ROUTER_CLASSES = {} + + @classmethod + def register_router_class(cls, target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.ROUTER_CLASSES[name] = target + return target + + @classmethod + def get_router_class(cls, name): + return cls.ROUTER_CLASSES.get(name, None) + + +class RouterFactory: + @classmethod + def new_router(cls, name, conn_mgr, **kwargs): + router_class = RouteManager.get_router_class(name) + assert router_class + return router_class(conn_mgr, **kwargs) + + +class RouterMixin: + def __init__(self, conn_mgr): + self.conn_mgr = conn_mgr + + def routing(self, table_name, metadata=None, **kwargs): + raise NotImplemented() + + def connection(self, metadata=None): + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) + if conn: + conn.on_connect(metadata=metadata) + return conn.conn + + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) + if not conn: + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) + return conn.conn + + +@RouteManager.register_router_class +class FileBasedHashRingRouter(RouterMixin): + NAME = 'FileBasedHashRingRouter' + + def __init__(self, conn_mgr, **kwargs): + super(FileBasedHashRingRouter, self).__init__(conn_mgr) + + def routing(self, table_name, metadata=None, **kwargs): + range_array = kwargs.pop('range_array', None) + return self._route(table_name, range_array, metadata, **kwargs) + + def _route(self, table_name, range_array, metadata=None, **kwargs): + # PXU TODO: Implement Thread-local Context + # PXU TODO: Session life mgt + try: + table = db.Session.query(Tables).filter( + and_(Tables.table_id == table_name, + Tables.state != Tables.TO_DELETE)).first() + except sqlalchemy_exc.SQLAlchemyError as e: + raise exceptions.DBError(message=str(e), metadata=metadata) + + if not table: + raise exceptions.TableNotFoundError(table_name, metadata=metadata) + files = table.files_to_search(range_array) + db.remove_session() + + servers = self.conn_mgr.conn_names + logger.info('Available servers: {}'.format(servers)) + + ring = HashRing(servers) + + routing = {} + + for f in files: + target_host = ring.get_node(str(f.id)) + sub = routing.get(target_host, None) + if not sub: + routing[target_host] = {'table_id': table_name, 'file_ids': []} + routing[target_host]['file_ids'].append(str(f.id)) + + return routing diff --git a/shards/mishards/server.py b/shards/mishards/server.py new file mode 100644 index 0000000000..599a00e455 --- /dev/null +++ b/shards/mishards/server.py @@ -0,0 +1,122 @@ +import logging +import grpc +import time +import socket +import inspect +from urllib.parse import urlparse +from functools import wraps +from concurrent import futures +from grpc._cython import cygrpc +from milvus.grpc_gen.milvus_pb2_grpc import add_MilvusServiceServicer_to_server +from mishards.grpc_utils import is_grpc_method +from mishards.service_handler import ServiceHandler +from mishards import settings + +logger = logging.getLogger(__name__) + + +class Server: + def __init__(self): + self.pre_run_handlers = set() + self.grpc_methods = set() + self.error_handlers = {} + self.exit_flag = False + + def init_app(self, + conn_mgr, + tracer, + router, + discover, + port=19530, + max_workers=10, + **kwargs): + self.port = int(port) + self.conn_mgr = conn_mgr + self.tracer = tracer + self.router = router + self.discover = discover + + self.server_impl = grpc.server( + thread_pool=futures.ThreadPoolExecutor(max_workers=max_workers), + options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), + (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) + + self.server_impl = self.tracer.decorate(self.server_impl) + + self.register_pre_run_handler(self.pre_run_handler) + + def pre_run_handler(self): + woserver = settings.WOSERVER + url = urlparse(woserver) + ip = socket.gethostbyname(url.hostname) + socket.inet_pton(socket.AF_INET, ip) + self.conn_mgr.register( + 'WOSERVER', '{}://{}:{}'.format(url.scheme, ip, url.port or 80)) + + def register_pre_run_handler(self, func): + logger.info('Regiterring {} into server pre_run_handlers'.format(func)) + self.pre_run_handlers.add(func) + return func + + def wrap_method_with_errorhandler(self, func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + if e.__class__ in self.error_handlers: + return self.error_handlers[e.__class__](e) + raise + + return wrapper + + def errorhandler(self, exception): + if inspect.isclass(exception) and issubclass(exception, Exception): + + def wrapper(func): + self.error_handlers[exception] = func + return func + + return wrapper + return exception + + def on_pre_run(self): + for handler in self.pre_run_handlers: + handler() + self.discover.start() + + def start(self, port=None): + handler_class = self.decorate_handler(ServiceHandler) + add_MilvusServiceServicer_to_server( + handler_class(tracer=self.tracer, + router=self.router), self.server_impl) + self.server_impl.add_insecure_port("[::]:{}".format( + str(port or self.port))) + self.server_impl.start() + + def run(self, port): + logger.info('Milvus server start ......') + port = port or self.port + self.on_pre_run() + + self.start(port) + logger.info('Listening on port {}'.format(port)) + + try: + while not self.exit_flag: + time.sleep(5) + except KeyboardInterrupt: + self.stop() + + def stop(self): + logger.info('Server is shuting down ......') + self.exit_flag = True + self.server_impl.stop(0) + self.tracer.close() + logger.info('Server is closed') + + def decorate_handler(self, handler): + for key, attr in handler.__dict__.items(): + if is_grpc_method(attr): + setattr(handler, key, self.wrap_method_with_errorhandler(attr)) + return handler diff --git a/shards/mishards/service_handler.py b/shards/mishards/service_handler.py new file mode 100644 index 0000000000..5e91c14f14 --- /dev/null +++ b/shards/mishards/service_handler.py @@ -0,0 +1,475 @@ +import logging +import time +import datetime +from collections import defaultdict + +import multiprocessing +from concurrent.futures import ThreadPoolExecutor +from milvus.grpc_gen import milvus_pb2, milvus_pb2_grpc, status_pb2 +from milvus.grpc_gen.milvus_pb2 import TopKQueryResult +from milvus.client.abstract import Range +from milvus.client import types as Types + +from mishards import (db, settings, exceptions) +from mishards.grpc_utils import mark_grpc_method +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards import utilities + +logger = logging.getLogger(__name__) + + +class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): + MAX_NPROBE = 2048 + MAX_TOPK = 2048 + + def __init__(self, tracer, router, max_workers=multiprocessing.cpu_count(), **kwargs): + self.table_meta = {} + self.error_handlers = {} + self.tracer = tracer + self.router = router + self.max_workers = max_workers + + def _do_merge(self, files_n_topk_results, topk, reverse=False, **kwargs): + status = status_pb2.Status(error_code=status_pb2.SUCCESS, + reason="Success") + if not files_n_topk_results: + return status, [] + + request_results = defaultdict(list) + + calc_time = time.time() + for files_collection in files_n_topk_results: + if isinstance(files_collection, tuple): + status, _ = files_collection + return status, [] + for request_pos, each_request_results in enumerate( + files_collection.topk_query_result): + request_results[request_pos].extend( + each_request_results.query_result_arrays) + request_results[request_pos] = sorted( + request_results[request_pos], + key=lambda x: x.distance, + reverse=reverse)[:topk] + + calc_time = time.time() - calc_time + logger.info('Merge takes {}'.format(calc_time)) + + results = sorted(request_results.items()) + topk_query_result = [] + + for result in results: + query_result = TopKQueryResult(query_result_arrays=result[1]) + topk_query_result.append(query_result) + + return status, topk_query_result + + def _do_query(self, + context, + table_id, + table_meta, + vectors, + topk, + nprobe, + range_array=None, + **kwargs): + metadata = kwargs.get('metadata', None) + range_array = [ + utilities.range_to_date(r, metadata=metadata) for r in range_array + ] if range_array else None + + routing = {} + p_span = None if self.tracer.empty else context.get_active_span( + ).context + with self.tracer.start_span('get_routing', child_of=p_span): + routing = self.router.routing(table_id, + range_array=range_array, + metadata=metadata) + logger.info('Routing: {}'.format(routing)) + + metadata = kwargs.get('metadata', None) + + rs = [] + all_topk_results = [] + + def search(addr, query_params, vectors, topk, nprobe, **kwargs): + logger.info( + 'Send Search Request: addr={};params={};nq={};topk={};nprobe={}' + .format(addr, query_params, len(vectors), topk, nprobe)) + + conn = self.router.query_conn(addr, metadata=metadata) + start = time.time() + span = kwargs.get('span', None) + span = span if span else (None if self.tracer.empty else + context.get_active_span().context) + + with self.tracer.start_span('search_{}'.format(addr), + child_of=span): + ret = conn.search_vectors_in_files( + table_name=query_params['table_id'], + file_ids=query_params['file_ids'], + query_records=vectors, + top_k=topk, + nprobe=nprobe, + lazy_=True) + end = time.time() + logger.info('search_vectors_in_files takes: {}'.format(end - start)) + + all_topk_results.append(ret) + + with self.tracer.start_span('do_search', child_of=p_span) as span: + with ThreadPoolExecutor(max_workers=self.max_workers) as pool: + for addr, params in routing.items(): + res = pool.submit(search, + addr, + params, + vectors, + topk, + nprobe, + span=span) + rs.append(res) + + for res in rs: + res.result() + + reverse = table_meta.metric_type == Types.MetricType.IP + with self.tracer.start_span('do_merge', child_of=p_span): + return self._do_merge(all_topk_results, + topk, + reverse=reverse, + metadata=metadata) + + def _create_table(self, table_schema): + return self.router.connection().create_table(table_schema) + + @mark_grpc_method + def CreateTable(self, request, context): + _status, _table_schema = Parser.parse_proto_TableSchema(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('CreateTable {}'.format(_table_schema['table_name'])) + + _status = self._create_table(_table_schema) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _has_table(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).has_table(table_name) + + @mark_grpc_method + def HasTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=False) + + logger.info('HasTable {}'.format(_table_name)) + + _status, _bool = self._has_table(_table_name, + metadata={'resp_class': milvus_pb2.BoolReply}) + + return milvus_pb2.BoolReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + bool_reply=_bool) + + def _delete_table(self, table_name): + return self.router.connection().delete_table(table_name) + + @mark_grpc_method + def DropTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('DropTable {}'.format(_table_name)) + + _status = self._delete_table(_table_name) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _create_index(self, table_name, index): + return self.router.connection().create_index(table_name, index) + + @mark_grpc_method + def CreateIndex(self, request, context): + _status, unpacks = Parser.parse_proto_IndexParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + _table_name, _index = unpacks + + logger.info('CreateIndex {}'.format(_table_name)) + + # TODO: interface create_table incompleted + _status = self._create_index(_table_name, _index) + + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _add_vectors(self, param, metadata=None): + return self.router.connection(metadata=metadata).add_vectors( + None, None, insert_param=param) + + @mark_grpc_method + def Insert(self, request, context): + logger.info('Insert') + # TODO: Ths SDK interface add_vectors() could update, add a key 'row_id_array' + _status, _ids = self._add_vectors( + metadata={'resp_class': milvus_pb2.VectorIds}, param=request) + return milvus_pb2.VectorIds(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + vector_id_array=_ids) + + @mark_grpc_method + def Search(self, request, context): + + table_name = request.table_name + + topk = request.topk + nprobe = request.nprobe + + logger.info('Search {}: topk={} nprobe={}'.format( + table_name, topk, nprobe)) + + metadata = {'resp_class': milvus_pb2.TopKQueryResultList} + + if nprobe > self.MAX_NPROBE or nprobe <= 0: + raise exceptions.InvalidArgumentError( + message='Invalid nprobe: {}'.format(nprobe), metadata=metadata) + + if topk > self.MAX_TOPK or topk <= 0: + raise exceptions.InvalidTopKError( + message='Invalid topk: {}'.format(topk), metadata=metadata) + + table_meta = self.table_meta.get(table_name, None) + + if not table_meta: + status, info = self.router.connection( + metadata=metadata).describe_table(table_name) + if not status.OK(): + raise exceptions.TableNotFoundError(table_name, + metadata=metadata) + + self.table_meta[table_name] = info + table_meta = info + + start = time.time() + + query_record_array = [] + + for query_record in request.query_record_array: + query_record_array.append(list(query_record.vector_data)) + + query_range_array = [] + for query_range in request.query_range_array: + query_range_array.append( + Range(query_range.start_value, query_range.end_value)) + + status, results = self._do_query(context, + table_name, + table_meta, + query_record_array, + topk, + nprobe, + query_range_array, + metadata=metadata) + + now = time.time() + logger.info('SearchVector takes: {}'.format(now - start)) + + topk_result_list = milvus_pb2.TopKQueryResultList( + status=status_pb2.Status(error_code=status.error_code, + reason=status.reason), + topk_query_result=results) + return topk_result_list + + @mark_grpc_method + def SearchInFiles(self, request, context): + raise NotImplemented() + + def _describe_table(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).describe_table(table_name) + + @mark_grpc_method + def DescribeTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.TableSchema(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), ) + + metadata = {'resp_class': milvus_pb2.TableSchema} + + logger.info('DescribeTable {}'.format(_table_name)) + _status, _table = self._describe_table(metadata=metadata, + table_name=_table_name) + + if _status.OK(): + return milvus_pb2.TableSchema( + table_name=_table_name, + index_file_size=_table.index_file_size, + dimension=_table.dimension, + metric_type=_table.metric_type, + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + ) + + return milvus_pb2.TableSchema( + table_name=_table_name, + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + ) + + def _count_table(self, table_name, metadata=None): + return self.router.connection( + metadata=metadata).get_table_row_count(table_name) + + @mark_grpc_method + def CountTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + status = status_pb2.Status(error_code=_status.code, + reason=_status.message) + + return milvus_pb2.TableRowCount(status=status) + + logger.info('CountTable {}'.format(_table_name)) + + metadata = {'resp_class': milvus_pb2.TableRowCount} + _status, _count = self._count_table(_table_name, metadata=metadata) + + return milvus_pb2.TableRowCount( + status=status_pb2.Status(error_code=_status.code, + reason=_status.message), + table_row_count=_count if isinstance(_count, int) else -1) + + def _get_server_version(self, metadata=None): + return self.router.connection(metadata=metadata).server_version() + + @mark_grpc_method + def Cmd(self, request, context): + _status, _cmd = Parser.parse_proto_Command(request) + logger.info('Cmd: {}'.format(_cmd)) + + if not _status.OK(): + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + metadata = {'resp_class': milvus_pb2.StringReply} + + if _cmd == 'version': + _status, _reply = self._get_server_version(metadata=metadata) + else: + _status, _reply = self.router.connection( + metadata=metadata).server_status() + + return milvus_pb2.StringReply(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + string_reply=_reply) + + def _show_tables(self, metadata=None): + return self.router.connection(metadata=metadata).show_tables() + + @mark_grpc_method + def ShowTables(self, request, context): + logger.info('ShowTables') + metadata = {'resp_class': milvus_pb2.TableName} + _status, _results = self._show_tables(metadata=metadata) + + return milvus_pb2.TableNameList(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_names=_results) + + def _delete_by_range(self, table_name, start_date, end_date): + return self.router.connection().delete_vectors_by_range(table_name, + start_date, + end_date) + + @mark_grpc_method + def DeleteByRange(self, request, context): + _status, unpacks = \ + Parser.parse_proto_DeleteByRangeParam(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + _table_name, _start_date, _end_date = unpacks + + logger.info('DeleteByRange {}: {} {}'.format(_table_name, _start_date, + _end_date)) + _status = self._delete_by_range(_table_name, _start_date, _end_date) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _preload_table(self, table_name): + return self.router.connection().preload_table(table_name) + + @mark_grpc_method + def PreloadTable(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('PreloadTable {}'.format(_table_name)) + _status = self._preload_table(_table_name) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + def _describe_index(self, table_name, metadata=None): + return self.router.connection(metadata=metadata).describe_index(table_name) + + @mark_grpc_method + def DescribeIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + metadata = {'resp_class': milvus_pb2.IndexParam} + + logger.info('DescribeIndex {}'.format(_table_name)) + _status, _index_param = self._describe_index(table_name=_table_name, + metadata=metadata) + + if not _index_param: + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message)) + + _index = milvus_pb2.Index(index_type=_index_param._index_type, + nlist=_index_param._nlist) + + return milvus_pb2.IndexParam(status=status_pb2.Status( + error_code=_status.code, reason=_status.message), + table_name=_table_name, + index=_index) + + def _drop_index(self, table_name): + return self.router.connection().drop_index(table_name) + + @mark_grpc_method + def DropIndex(self, request, context): + _status, _table_name = Parser.parse_proto_TableName(request) + + if not _status.OK(): + return status_pb2.Status(error_code=_status.code, + reason=_status.message) + + logger.info('DropIndex {}'.format(_table_name)) + _status = self._drop_index(_table_name) + return status_pb2.Status(error_code=_status.code, + reason=_status.message) diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py new file mode 100644 index 0000000000..21a3bb7a65 --- /dev/null +++ b/shards/mishards/settings.py @@ -0,0 +1,94 @@ +import sys +import os + +from environs import Env +env = Env() + +FROM_EXAMPLE = env.bool('FROM_EXAMPLE', False) +if FROM_EXAMPLE: + from dotenv import load_dotenv + load_dotenv('./mishards/.env.example') +else: + env.read_env() + +DEBUG = env.bool('DEBUG', False) + +LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') +LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') +LOG_NAME = env.str('LOG_NAME', 'logfile') +TIMEZONE = env.str('TIMEZONE', 'UTC') + +from utils.logger_helper import config +config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) + +TIMEOUT = env.int('TIMEOUT', 60) +MAX_RETRY = env.int('MAX_RETRY', 3) + +SERVER_PORT = env.int('SERVER_PORT', 19530) +SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) +WOSERVER = env.str('WOSERVER') + +SD_PROVIDER_SETTINGS = None +SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') +if SD_PROVIDER == 'Kubernetes': + from sd.kubernetes_provider import KubernetesProviderSettings + SD_PROVIDER_SETTINGS = KubernetesProviderSettings( + namespace=env.str('SD_NAMESPACE', ''), + in_cluster=env.bool('SD_IN_CLUSTER', False), + poll_interval=env.int('SD_POLL_INTERVAL', 5), + pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), + label_selector=env.str('SD_LABEL_SELECTOR', ''), + port=env.int('SD_PORT', 19530)) +elif SD_PROVIDER == 'Static': + from sd.static_provider import StaticProviderSettings + SD_PROVIDER_SETTINGS = StaticProviderSettings( + hosts=env.list('SD_STATIC_HOSTS', []), + port=env.int('SD_STATIC_PORT', 19530)) + +# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') + + +class TracingConfig: + TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') + TRACING_VALIDATE = env.bool('TRACING_VALIDATE', True) + TRACING_LOG_PAYLOAD = env.bool('TRACING_LOG_PAYLOAD', False) + TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "1"), + }, + 'local_agent': { + 'reporting_host': env.str('TRACING_REPORTING_HOST', '127.0.0.1'), + 'reporting_port': env.str('TRACING_REPORTING_PORT', '5775') + }, + 'logging': env.bool('TRACING_LOGGING', True) + } + DEFAULT_TRACING_CONFIG = { + 'sampler': { + 'type': env.str('TRACING_SAMPLER_TYPE', 'const'), + 'param': env.str('TRACING_SAMPLER_PARAM', "0"), + } + } + + +class DefaultConfig: + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') + SQL_ECHO = env.bool('SQL_ECHO', False) + TRACING_TYPE = env.str('TRACING_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') + + +class TestingConfig(DefaultConfig): + SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') + SQL_ECHO = env.bool('SQL_TEST_ECHO', False) + TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') + ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') + + +if __name__ == '__main__': + import logging + logger = logging.getLogger(__name__) + logger.debug('DEBUG') + logger.info('INFO') + logger.warn('WARN') + logger.error('ERROR') diff --git a/shards/mishards/test_connections.py b/shards/mishards/test_connections.py new file mode 100644 index 0000000000..819d2e03da --- /dev/null +++ b/shards/mishards/test_connections.py @@ -0,0 +1,101 @@ +import logging +import pytest +import mock + +from milvus import Milvus +from mishards.connections import (ConnectionMgr, Connection) +from mishards import exceptions + +logger = logging.getLogger(__name__) + + +@pytest.mark.usefixtures('app') +class TestConnection: + def test_manager(self): + mgr = ConnectionMgr() + + mgr.register('pod1', '111') + mgr.register('pod2', '222') + mgr.register('pod2', '222') + mgr.register('pod2', '2222') + assert len(mgr.conn_names) == 2 + + mgr.unregister('pod1') + assert len(mgr.conn_names) == 1 + + mgr.unregister('pod2') + assert len(mgr.conn_names) == 0 + + mgr.register('WOSERVER', 'xxxx') + assert len(mgr.conn_names) == 0 + + assert not mgr.conn('XXXX', None) + with pytest.raises(exceptions.ConnectionNotFoundError): + mgr.conn('XXXX', None, True) + + mgr.conn('WOSERVER', None) + + def test_connection(self): + class Conn: + def __init__(self, state): + self.state = state + + def connect(self, uri): + return self.state + + def connected(self): + return self.state + + FAIL_CONN = Conn(False) + PASS_CONN = Conn(True) + + class Retry: + def __init__(self): + self.times = 0 + + def __call__(self, conn): + self.times += 1 + logger.info('Retrying {}'.format(self.times)) + + class Func(): + def __init__(self): + self.executed = False + + def __call__(self): + self.executed = True + + max_retry = 3 + + RetryObj = Retry() + + c = Connection('client', + uri='xx', + max_retry=max_retry, + on_retry_func=RetryObj) + c.conn = FAIL_CONN + ff = Func() + this_connect = c.connect(func=ff) + with pytest.raises(exceptions.ConnectionConnectError): + this_connect() + assert RetryObj.times == max_retry + assert not ff.executed + RetryObj = Retry() + + c.conn = PASS_CONN + this_connect = c.connect(func=ff) + this_connect() + assert ff.executed + assert RetryObj.times == 0 + + this_connect = c.connect(func=None) + with pytest.raises(TypeError): + this_connect() + + errors = [] + + def error_handler(err): + errors.append(err) + + this_connect = c.connect(func=None, exception_handler=error_handler) + this_connect() + assert len(errors) == 1 diff --git a/shards/mishards/test_models.py b/shards/mishards/test_models.py new file mode 100644 index 0000000000..d60b62713e --- /dev/null +++ b/shards/mishards/test_models.py @@ -0,0 +1,39 @@ +import logging +import pytest +from mishards.factories import TableFiles, Tables, TableFilesFactory, TablesFactory +from mishards import db, create_app, settings +from mishards.factories import ( + Tables, TableFiles, + TablesFactory, TableFilesFactory +) + +logger = logging.getLogger(__name__) + + +@pytest.mark.usefixtures('app') +class TestModels: + def test_files_to_search(self): + table = TablesFactory() + new_files_cnt = 5 + to_index_cnt = 10 + raw_cnt = 20 + backup_cnt = 12 + to_delete_cnt = 9 + index_cnt = 8 + new_index_cnt = 6 + new_merge_cnt = 11 + + new_files = TableFilesFactory.create_batch(new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) + to_index_files = TableFilesFactory.create_batch(to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) + raw_files = TableFilesFactory.create_batch(raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) + backup_files = TableFilesFactory.create_batch(backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) + index_files = TableFilesFactory.create_batch(index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) + new_index_files = TableFilesFactory.create_batch(new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) + new_merge_files = TableFilesFactory.create_batch(new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) + to_delete_files = TableFilesFactory.create_batch(to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) + assert table.files_to_search().count() == raw_cnt + index_cnt + to_index_cnt + + assert table.files_to_search([(100, 115)]).count() == index_cnt + to_index_cnt + assert table.files_to_search([(111, 120)]).count() == 0 + assert table.files_to_search([(111, 121)]).count() == raw_cnt + assert table.files_to_search([(110, 121)]).count() == raw_cnt + index_cnt + to_index_cnt diff --git a/shards/mishards/test_server.py b/shards/mishards/test_server.py new file mode 100644 index 0000000000..efd3912076 --- /dev/null +++ b/shards/mishards/test_server.py @@ -0,0 +1,279 @@ +import logging +import pytest +import mock +import datetime +import random +import faker +import inspect +from milvus import Milvus +from milvus.client.types import Status, IndexType, MetricType +from milvus.client.abstract import IndexParam, TableSchema +from milvus.grpc_gen import status_pb2, milvus_pb2 +from mishards import db, create_app, settings +from mishards.service_handler import ServiceHandler +from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser +from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables +from mishards.routings import RouterMixin + +logger = logging.getLogger(__name__) + +OK = Status(code=Status.SUCCESS, message='Success') +BAD = Status(code=Status.PERMISSION_DENIED, message='Fail') + + +@pytest.mark.usefixtures('started_app') +class TestServer: + @property + def client(self): + m = Milvus() + m.connect(host='localhost', port=settings.SERVER_TEST_PORT) + return m + + def test_server_start(self, started_app): + assert started_app.conn_mgr.metas.get('WOSERVER') == settings.WOSERVER + + def test_cmd(self, started_app): + ServiceHandler._get_server_version = mock.MagicMock(return_value=(OK, + '')) + status, _ = self.client.server_version() + assert status.OK() + + Parser.parse_proto_Command = mock.MagicMock(return_value=(BAD, 'cmd')) + status, _ = self.client.server_version() + assert not status.OK() + + def test_drop_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + ServiceHandler._drop_index = mock.MagicMock(return_value=OK) + status = self.client.drop_index(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.drop_index(table_name) + assert not status.OK() + + def test_describe_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + index_type = IndexType.FLAT + nlist = 1 + index_param = IndexParam(table_name=table_name, + index_type=index_type, + nlist=nlist) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._describe_index = mock.MagicMock( + return_value=(OK, index_param)) + status, ret = self.client.describe_index(table_name) + assert status.OK() + assert ret._table_name == index_param._table_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client.describe_index(table_name) + assert not status.OK() + + def test_preload(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._preload_table = mock.MagicMock(return_value=OK) + status = self.client.preload_table(table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.preload_table(table_name) + assert not status.OK() + + @pytest.mark.skip + def test_delete_by_range(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + unpacked = table_name, datetime.datetime.today( + ), datetime.datetime.today() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(OK, unpacked)) + ServiceHandler._delete_by_range = mock.MagicMock(return_value=OK) + status = self.client.delete_vectors_by_range( + *unpacked) + assert status.OK() + + Parser.parse_proto_DeleteByRangeParam = mock.MagicMock( + return_value=(BAD, unpacked)) + status = self.client.delete_vectors_by_range( + *unpacked) + assert not status.OK() + + def test_count_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + count = random.randint(100, 200) + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._count_table = mock.MagicMock(return_value=(OK, count)) + status, ret = self.client.get_table_row_count(table_name) + assert status.OK() + assert ret == count + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, _ = self.client.get_table_row_count(table_name) + assert not status.OK() + + def test_show_tables(self, started_app): + tables = ['t1', 't2'] + ServiceHandler._show_tables = mock.MagicMock(return_value=(OK, tables)) + status, ret = self.client.show_tables() + assert status.OK() + assert ret == tables + + def test_describe_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + nlist = 1 + table_schema = TableSchema(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_schema.table_name)) + ServiceHandler._describe_table = mock.MagicMock( + return_value=(OK, table_schema)) + status, _ = self.client.describe_table(table_name) + assert status.OK() + + ServiceHandler._describe_table = mock.MagicMock( + return_value=(BAD, table_schema)) + status, _ = self.client.describe_table(table_name) + assert not status.OK() + + Parser.parse_proto_TableName = mock.MagicMock(return_value=(BAD, + 'cmd')) + status, ret = self.client.describe_table(table_name) + assert not status.OK() + + def test_insert(self, started_app): + table_name = inspect.currentframe().f_code.co_name + vectors = [[random.random() for _ in range(16)] for _ in range(10)] + ids = [random.randint(1000000, 20000000) for _ in range(10)] + ServiceHandler._add_vectors = mock.MagicMock(return_value=(OK, ids)) + status, ret = self.client.add_vectors( + table_name=table_name, records=vectors) + assert status.OK() + assert ids == ret + + def test_create_index(self, started_app): + table_name = inspect.currentframe().f_code.co_name + unpacks = table_name, None + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(OK, + unpacks)) + ServiceHandler._create_index = mock.MagicMock(return_value=OK) + status = self.client.create_index(table_name=table_name) + assert status.OK() + + Parser.parse_proto_IndexParam = mock.MagicMock(return_value=(BAD, + None)) + status = self.client.create_index(table_name=table_name) + assert not status.OK() + + def test_drop_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._delete_table = mock.MagicMock(return_value=OK) + status = self.client.delete_table(table_name=table_name) + assert status.OK() + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status = self.client.delete_table(table_name=table_name) + assert not status.OK() + + def test_has_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(OK, table_name)) + ServiceHandler._has_table = mock.MagicMock(return_value=(OK, True)) + has = self.client.has_table(table_name=table_name) + assert has + + Parser.parse_proto_TableName = mock.MagicMock( + return_value=(BAD, table_name)) + status, has = self.client.has_table(table_name=table_name) + assert not status.OK() + assert not has + + def test_create_table(self, started_app): + table_name = inspect.currentframe().f_code.co_name + dimension = 128 + table_schema = dict(table_name=table_name, + index_file_size=100, + metric_type=MetricType.L2, + dimension=dimension) + + ServiceHandler._create_table = mock.MagicMock(return_value=OK) + status = self.client.create_table(table_schema) + assert status.OK() + + Parser.parse_proto_TableSchema = mock.MagicMock(return_value=(BAD, + None)) + status = self.client.create_table(table_schema) + assert not status.OK() + + def random_data(self, n, dimension): + return [[random.random() for _ in range(dimension)] for _ in range(n)] + + def test_search(self, started_app): + table_name = inspect.currentframe().f_code.co_name + to_index_cnt = random.randint(10, 20) + table = TablesFactory(table_id=table_name, state=Tables.NORMAL) + to_index_files = TableFilesFactory.create_batch( + to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX) + topk = random.randint(5, 10) + nq = random.randint(5, 10) + param = { + 'table_name': table_name, + 'query_records': self.random_data(nq, table.dimension), + 'top_k': topk, + 'nprobe': 2049 + } + + result = [ + milvus_pb2.TopKQueryResult(query_result_arrays=[ + milvus_pb2.QueryResult(id=i, distance=random.random()) + for i in range(topk) + ]) for i in range(nq) + ] + + mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( + error_code=status_pb2.SUCCESS, reason="Success"), + topk_query_result=result) + + table_schema = TableSchema(table_name=table_name, + index_file_size=table.index_file_size, + metric_type=table.metric_type, + dimension=table.dimension) + + status, _ = self.client.search_vectors(**param) + assert status.code == Status.ILLEGAL_ARGUMENT + + param['nprobe'] = 2048 + RouterMixin.connection = mock.MagicMock(return_value=Milvus()) + RouterMixin.query_conn = mock.MagicMock(return_value=Milvus()) + Milvus.describe_table = mock.MagicMock(return_value=(BAD, + table_schema)) + status, ret = self.client.search_vectors(**param) + assert status.code == Status.TABLE_NOT_EXISTS + + Milvus.describe_table = mock.MagicMock(return_value=(OK, table_schema)) + Milvus.search_vectors_in_files = mock.MagicMock( + return_value=mock_results) + + status, ret = self.client.search_vectors(**param) + assert status.OK() + assert len(ret) == nq diff --git a/shards/mishards/utilities.py b/shards/mishards/utilities.py new file mode 100644 index 0000000000..42e982b5f1 --- /dev/null +++ b/shards/mishards/utilities.py @@ -0,0 +1,20 @@ +import datetime +from mishards import exceptions + + +def format_date(start, end): + return ((start.year - 1900) * 10000 + (start.month - 1) * 100 + start.day, + (end.year - 1900) * 10000 + (end.month - 1) * 100 + end.day) + + +def range_to_date(range_obj, metadata=None): + try: + start = datetime.datetime.strptime(range_obj.start_date, '%Y-%m-%d') + end = datetime.datetime.strptime(range_obj.end_date, '%Y-%m-%d') + assert start < end + except (ValueError, AssertionError): + raise exceptions.InvalidRangeError('Invalid time range: {} {}'.format( + range_obj.start_date, range_obj.end_date), + metadata=metadata) + + return format_date(start, end) diff --git a/shards/requirements.txt b/shards/requirements.txt new file mode 100644 index 0000000000..ae224e92ed --- /dev/null +++ b/shards/requirements.txt @@ -0,0 +1,36 @@ +environs==4.2.0 +factory-boy==2.12.0 +Faker==1.0.7 +fire==0.1.3 +google-auth==1.6.3 +grpcio==1.22.0 +grpcio-tools==1.22.0 +kubernetes==10.0.1 +MarkupSafe==1.1.1 +marshmallow==2.19.5 +pymysql==0.9.3 +protobuf==3.9.1 +py==1.8.0 +pyasn1==0.4.7 +pyasn1-modules==0.2.6 +pylint==2.3.1 +pymilvus-test==0.2.28 +#pymilvus==0.2.0 +pyparsing==2.4.0 +pytest==4.6.3 +pytest-level==0.1.1 +pytest-print==0.1.2 +pytest-repeat==0.8.0 +pytest-timeout==1.3.3 +python-dateutil==2.8.0 +python-dotenv==0.10.3 +pytz==2019.1 +requests==2.22.0 +requests-oauthlib==1.2.0 +rsa==4.0 +six==1.12.0 +SQLAlchemy==1.3.5 +urllib3==1.25.3 +jaeger-client>=3.4.0 +grpcio-opentracing>=1.0 +mock==2.0.0 diff --git a/shards/sd/__init__.py b/shards/sd/__init__.py new file mode 100644 index 0000000000..7943887d0f --- /dev/null +++ b/shards/sd/__init__.py @@ -0,0 +1,28 @@ +import logging +import inspect +# from utils import singleton + +logger = logging.getLogger(__name__) + + +class ProviderManager: + PROVIDERS = {} + + @classmethod + def register_service_provider(cls, target): + if inspect.isfunction(target): + cls.PROVIDERS[target.__name__] = target + elif inspect.isclass(target): + name = target.__dict__.get('NAME', None) + name = name if name else target.__class__.__name__ + cls.PROVIDERS[name] = target + else: + assert False, 'Cannot register_service_provider for: {}'.format(target) + return target + + @classmethod + def get_provider(cls, name): + return cls.PROVIDERS.get(name, None) + + +from sd import kubernetes_provider, static_provider diff --git a/shards/sd/kubernetes_provider.py b/shards/sd/kubernetes_provider.py new file mode 100644 index 0000000000..eb113db007 --- /dev/null +++ b/shards/sd/kubernetes_provider.py @@ -0,0 +1,331 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) + +import re +import logging +import time +import copy +import threading +import queue +import enum +from kubernetes import client, config, watch + +from utils import singleton +from sd import ProviderManager + +logger = logging.getLogger(__name__) + +INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' + + +class EventType(enum.Enum): + PodHeartBeat = 1 + Watch = 2 + + +class K8SMixin: + def __init__(self, namespace, in_cluster=False, **kwargs): + self.namespace = namespace + self.in_cluster = in_cluster + self.kwargs = kwargs + self.v1 = kwargs.get('v1', None) + if not self.namespace: + self.namespace = open(INCLUSTER_NAMESPACE_PATH).read() + + if not self.v1: + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + +class K8SHeartbeatHandler(threading.Thread, K8SMixin): + def __init__(self, + message_queue, + namespace, + label_selector, + in_cluster=False, + **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.label_selector = label_selector + self.poll_interval = kwargs.get('poll_interval', 5) + + def run(self): + while not self.terminate: + try: + pods = self.v1.list_namespaced_pod( + namespace=self.namespace, + label_selector=self.label_selector) + event_message = {'eType': EventType.PodHeartBeat, 'events': []} + for item in pods.items: + pod = self.v1.read_namespaced_pod(name=item.metadata.name, + namespace=self.namespace) + name = pod.metadata.name + ip = pod.status.pod_ip + phase = pod.status.phase + reason = pod.status.reason + message = pod.status.message + ready = True if phase == 'Running' else False + + pod_event = dict(pod=name, + ip=ip, + ready=ready, + reason=reason, + message=message) + + event_message['events'].append(pod_event) + + self.queue.put(event_message) + + except Exception as exc: + logger.error(exc) + + time.sleep(self.poll_interval) + + def stop(self): + self.terminate = True + + +class K8SEventListener(threading.Thread, K8SMixin): + def __init__(self, message_queue, namespace, in_cluster=False, **kwargs): + K8SMixin.__init__(self, + namespace=namespace, + in_cluster=in_cluster, + **kwargs) + threading.Thread.__init__(self) + self.queue = message_queue + self.terminate = False + self.at_start_up = True + self._stop_event = threading.Event() + + def stop(self): + self.terminate = True + self._stop_event.set() + + def run(self): + resource_version = '' + w = watch.Watch() + for event in w.stream(self.v1.list_namespaced_event, + namespace=self.namespace, + field_selector='involvedObject.kind=Pod'): + if self.terminate: + break + + resource_version = int(event['object'].metadata.resource_version) + + info = dict( + eType=EventType.Watch, + pod=event['object'].involved_object.name, + reason=event['object'].reason, + message=event['object'].message, + start_up=self.at_start_up, + ) + self.at_start_up = False + # logger.info('Received event: {}'.format(info)) + self.queue.put(info) + + +class EventHandler(threading.Thread): + def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs): + threading.Thread.__init__(self) + self.mgr = mgr + self.queue = message_queue + self.kwargs = kwargs + self.terminate = False + self.pod_patt = re.compile(pod_patt) + self.namespace = namespace + + def stop(self): + self.terminate = True + + def on_drop(self, event, **kwargs): + pass + + def on_pod_started(self, event, **kwargs): + try_cnt = 3 + pod = None + while try_cnt > 0: + try_cnt -= 1 + try: + pod = self.mgr.v1.read_namespaced_pod(name=event['pod'], + namespace=self.namespace) + if not pod.status.pod_ip: + time.sleep(0.5) + continue + break + except client.rest.ApiException as exc: + time.sleep(0.5) + + if try_cnt <= 0 and not pod: + if not event['start_up']: + logger.error('Pod {} is started but cannot read pod'.format( + event['pod'])) + return + elif try_cnt <= 0 and not pod.status.pod_ip: + logger.warning('NoPodIPFoundError') + return + + logger.info('Register POD {} with IP {}'.format( + pod.metadata.name, pod.status.pod_ip)) + self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip) + + def on_pod_killing(self, event, **kwargs): + logger.info('Unregister POD {}'.format(event['pod'])) + self.mgr.delete_pod(name=event['pod']) + + def on_pod_heartbeat(self, event, **kwargs): + names = self.mgr.conn_mgr.conn_names + + running_names = set() + for each_event in event['events']: + if each_event['ready']: + self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip']) + running_names.add(each_event['pod']) + else: + self.mgr.delete_pod(name=each_event['pod']) + + to_delete = names - running_names + for name in to_delete: + self.mgr.delete_pod(name) + + logger.info(self.mgr.conn_mgr.conn_names) + + def handle_event(self, event): + if event['eType'] == EventType.PodHeartBeat: + return self.on_pod_heartbeat(event) + + if not event or (event['reason'] not in ('Started', 'Killing')): + return self.on_drop(event) + + if not re.match(self.pod_patt, event['pod']): + return self.on_drop(event) + + logger.info('Handling event: {}'.format(event)) + + if event['reason'] == 'Started': + return self.on_pod_started(event) + + return self.on_pod_killing(event) + + def run(self): + while not self.terminate: + try: + event = self.queue.get(timeout=1) + self.handle_event(event) + except queue.Empty: + continue + + +class KubernetesProviderSettings: + def __init__(self, namespace, pod_patt, label_selector, in_cluster, + poll_interval, port=None, **kwargs): + self.namespace = namespace + self.pod_patt = pod_patt + self.label_selector = label_selector + self.in_cluster = in_cluster + self.poll_interval = poll_interval + self.port = int(port) if port else 19530 + + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Kubernetes' + + def __init__(self, settings, conn_mgr, **kwargs): + self.namespace = settings.namespace + self.pod_patt = settings.pod_patt + self.label_selector = settings.label_selector + self.in_cluster = settings.in_cluster + self.poll_interval = settings.poll_interval + self.port = settings.port + self.kwargs = kwargs + self.queue = queue.Queue() + + self.conn_mgr = conn_mgr + + if not self.namespace: + self.namespace = open(incluster_namespace_path).read() + + config.load_incluster_config( + ) if self.in_cluster else config.load_kube_config() + self.v1 = client.CoreV1Api() + + self.listener = K8SEventListener(message_queue=self.queue, + namespace=self.namespace, + in_cluster=self.in_cluster, + v1=self.v1, + **kwargs) + + self.pod_heartbeater = K8SHeartbeatHandler( + message_queue=self.queue, + namespace=self.namespace, + label_selector=self.label_selector, + in_cluster=self.in_cluster, + v1=self.v1, + poll_interval=self.poll_interval, + **kwargs) + + self.event_handler = EventHandler(mgr=self, + message_queue=self.queue, + namespace=self.namespace, + pod_patt=self.pod_patt, + **kwargs) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + def start(self): + self.listener.daemon = True + self.listener.start() + self.event_handler.start() + + self.pod_heartbeater.start() + + def stop(self): + self.listener.stop() + self.pod_heartbeater.stop() + self.event_handler.stop() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + + class Connect: + def register(self, name, value): + logger.error('Register: {} - {}'.format(name, value)) + + def unregister(self, name): + logger.error('Unregister: {}'.format(name)) + + @property + def conn_names(self): + return set() + + connect_mgr = Connect() + + settings = KubernetesProviderSettings(namespace='xp', + pod_patt=".*-ro-servers-.*", + label_selector='tier=ro-servers', + poll_interval=5, + in_cluster=False) + + provider_class = ProviderManager.get_provider('Kubernetes') + t = provider_class(conn_mgr=connect_mgr, settings=settings) + t.start() + cnt = 100 + while cnt > 0: + time.sleep(2) + cnt -= 1 + t.stop() diff --git a/shards/sd/static_provider.py b/shards/sd/static_provider.py new file mode 100644 index 0000000000..e88780740f --- /dev/null +++ b/shards/sd/static_provider.py @@ -0,0 +1,39 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import socket +from utils import singleton +from sd import ProviderManager + + +class StaticProviderSettings: + def __init__(self, hosts, port=None): + self.hosts = hosts + self.port = int(port) if port else 19530 + + +@singleton +@ProviderManager.register_service_provider +class KubernetesProvider(object): + NAME = 'Static' + + def __init__(self, settings, conn_mgr, **kwargs): + self.conn_mgr = conn_mgr + self.hosts = [socket.gethostbyname(host) for host in settings.hosts] + self.port = settings.port + + def start(self): + for host in self.hosts: + self.add_pod(host, host) + + def stop(self): + for host in self.hosts: + self.delete_pod(host) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) diff --git a/shards/setup.cfg b/shards/setup.cfg new file mode 100644 index 0000000000..4a88432914 --- /dev/null +++ b/shards/setup.cfg @@ -0,0 +1,4 @@ +[tool:pytest] +testpaths = mishards +log_cli=true +log_cli_level=info diff --git a/shards/start_services.yml b/shards/start_services.yml new file mode 100644 index 0000000000..57fe061bb7 --- /dev/null +++ b/shards/start_services.yml @@ -0,0 +1,45 @@ +version: "2.3" +services: + milvus: + runtime: nvidia + restart: always + image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de + # ports: + # - "0.0.0.0:19530:19530" + volumes: + - /tmp/milvus/db:/opt/milvus/db + + jaeger: + restart: always + image: jaegertracing/all-in-one:1.14 + ports: + - "0.0.0.0:5775:5775/udp" + - "0.0.0.0:16686:16686" + - "0.0.0.0:9441:9441" + environment: + COLLECTOR_ZIPKIN_HTTP_PORT: 9411 + + mishards: + restart: always + image: registry.zilliz.com/milvus/mishards:v0.0.4 + ports: + - "0.0.0.0:19530:19531" + - "0.0.0.0:19532:19532" + volumes: + - /tmp/milvus/db:/tmp/milvus/db + # - /tmp/mishards_env:/source/mishards/.env + command: ["python", "mishards/main.py"] + environment: + FROM_EXAMPLE: 'true' + DEBUG: 'true' + SERVER_PORT: 19531 + WOSERVER: tcp://milvus:19530 + SD_STATIC_HOSTS: milvus + TRACING_TYPE: jaeger + TRACING_SERVICE_NAME: mishards-demo + TRACING_REPORTING_HOST: jaeger + TRACING_REPORTING_PORT: 5775 + + depends_on: + - milvus + - jaeger diff --git a/shards/tracing/__init__.py b/shards/tracing/__init__.py new file mode 100644 index 0000000000..64a5b50d15 --- /dev/null +++ b/shards/tracing/__init__.py @@ -0,0 +1,43 @@ +from contextlib import contextmanager + + +def empty_server_interceptor_decorator(target_server, interceptor): + return target_server + + +@contextmanager +def EmptySpan(*args, **kwargs): + yield None + return + + +class Tracer: + def __init__(self, + tracer=None, + interceptor=None, + server_decorator=empty_server_interceptor_decorator): + self.tracer = tracer + self.interceptor = interceptor + self.server_decorator = server_decorator + + def decorate(self, server): + return self.server_decorator(server, self.interceptor) + + @property + def empty(self): + return self.tracer is None + + def close(self): + self.tracer and self.tracer.close() + + def start_span(self, + operation_name=None, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False): + if self.empty: + return EmptySpan() + return self.tracer.start_span(operation_name, child_of, references, + tags, start_time, ignore_active_span) diff --git a/shards/tracing/factory.py b/shards/tracing/factory.py new file mode 100644 index 0000000000..14fcde2eb3 --- /dev/null +++ b/shards/tracing/factory.py @@ -0,0 +1,40 @@ +import logging +from jaeger_client import Config +from grpc_opentracing.grpcext import intercept_server +from grpc_opentracing import open_tracing_server_interceptor + +from tracing import (Tracer, empty_server_interceptor_decorator) + +logger = logging.getLogger(__name__) + + +class TracerFactory: + @classmethod + def new_tracer(cls, + tracer_type, + tracer_config, + span_decorator=None, + **kwargs): + if not tracer_type: + return Tracer() + config = tracer_config.TRACING_CONFIG + service_name = tracer_config.TRACING_SERVICE_NAME + validate = tracer_config.TRACING_VALIDATE + # if not tracer_type: + # tracer_type = 'jaeger' + # config = tracer_config.DEFAULT_TRACING_CONFIG + + if tracer_type.lower() == 'jaeger': + config = Config(config=config, + service_name=service_name, + validate=validate) + + tracer = config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor( + tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) + + return Tracer(tracer, tracer_interceptor, intercept_server) + + assert False, 'Unsupported tracer type: {}'.format(tracer_type) diff --git a/shards/utils/__init__.py b/shards/utils/__init__.py new file mode 100644 index 0000000000..c1d55e76c0 --- /dev/null +++ b/shards/utils/__init__.py @@ -0,0 +1,11 @@ +from functools import wraps + + +def singleton(cls): + instances = {} + @wraps(cls) + def getinstance(*args, **kw): + if cls not in instances: + instances[cls] = cls(*args, **kw) + return instances[cls] + return getinstance diff --git a/shards/utils/logger_helper.py b/shards/utils/logger_helper.py new file mode 100644 index 0000000000..b4e3b9c5b6 --- /dev/null +++ b/shards/utils/logger_helper.py @@ -0,0 +1,152 @@ +import os +import datetime +from pytz import timezone +from logging import Filter +import logging.config + + +class InfoFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.INFO + + +class DebugFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.DEBUG + + +class WarnFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.WARN + + +class ErrorFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.ERROR + + +class CriticalFilter(logging.Filter): + def filter(self, rec): + return rec.levelno == logging.CRITICAL + + +COLORS = { + 'HEADER': '\033[95m', + 'INFO': '\033[92m', + 'DEBUG': '\033[94m', + 'WARNING': '\033[93m', + 'ERROR': '\033[95m', + 'CRITICAL': '\033[91m', + 'ENDC': '\033[0m', +} + + +class ColorFulFormatColMixin: + def format_col(self, message_str, level_name): + if level_name in COLORS.keys(): + message_str = COLORS.get(level_name) + message_str + COLORS.get( + 'ENDC') + return message_str + + +class ColorfulFormatter(logging.Formatter, ColorFulFormatColMixin): + def format(self, record): + message_str = super(ColorfulFormatter, self).format(record) + + return self.format_col(message_str, level_name=record.levelname) + + +def config(log_level, log_path, name, tz='UTC'): + def build_log_file(level, log_path, name, tz): + utc_now = datetime.datetime.utcnow() + utc_tz = timezone('UTC') + local_tz = timezone(tz) + tznow = utc_now.replace(tzinfo=utc_tz).astimezone(local_tz) + return '{}-{}-{}.log'.format(os.path.join(log_path, name), tznow.strftime("%m-%d-%Y-%H:%M:%S"), + level) + + if not os.path.exists(log_path): + os.makedirs(log_path) + + LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'default': { + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', + }, + 'colorful_console': { + 'format': '%(asctime)s | %(levelname)s | %(name)s | %(threadName)s: %(message)s (%(filename)s:%(lineno)s)', + '()': ColorfulFormatter, + }, + }, + 'filters': { + 'InfoFilter': { + '()': InfoFilter, + }, + 'DebugFilter': { + '()': DebugFilter, + }, + 'WarnFilter': { + '()': WarnFilter, + }, + 'ErrorFilter': { + '()': ErrorFilter, + }, + 'CriticalFilter': { + '()': CriticalFilter, + }, + }, + 'handlers': { + 'milvus_celery_console': { + 'class': 'logging.StreamHandler', + 'formatter': 'colorful_console', + }, + 'milvus_debug_file': { + 'level': 'DEBUG', + 'filters': ['DebugFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('debug', log_path, name, tz) + }, + 'milvus_info_file': { + 'level': 'INFO', + 'filters': ['InfoFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('info', log_path, name, tz) + }, + 'milvus_warn_file': { + 'level': 'WARN', + 'filters': ['WarnFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('warn', log_path, name, tz) + }, + 'milvus_error_file': { + 'level': 'ERROR', + 'filters': ['ErrorFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('error', log_path, name, tz) + }, + 'milvus_critical_file': { + 'level': 'CRITICAL', + 'filters': ['CriticalFilter'], + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'default', + 'filename': build_log_file('critical', log_path, name, tz) + }, + }, + 'loggers': { + '': { + 'handlers': ['milvus_celery_console', 'milvus_info_file', 'milvus_debug_file', 'milvus_warn_file', + 'milvus_error_file', 'milvus_critical_file'], + 'level': log_level, + 'propagate': False + }, + }, + 'propagate': False, + } + + logging.config.dictConfig(LOGGING) From f89e5cfc4e912600fcfc3209cc4b6c08602e6193 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 10:20:37 +0800 Subject: [PATCH 092/196] update manager --- shards/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/shards/manager.py b/shards/manager.py index 931c90ebc8..d9d303d2df 100644 --- a/shards/manager.py +++ b/shards/manager.py @@ -1,6 +1,6 @@ import fire -from mishards import db from sqlalchemy import and_ +from mishards import db, settings class DBHandler: @@ -25,4 +25,5 @@ class DBHandler: if __name__ == '__main__': + db.init_db(settings.DefaultConfig.SQLALCHEMY_DATABASE_URI) fire.Fire(DBHandler) From de25bd08a859749b4b6295a48f3d027982f8b241 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 14:32:08 +0800 Subject: [PATCH 093/196] update manager --- shards/manager.py | 1 + 1 file changed, 1 insertion(+) diff --git a/shards/manager.py b/shards/manager.py index d9d303d2df..666ddd377e 100644 --- a/shards/manager.py +++ b/shards/manager.py @@ -26,4 +26,5 @@ class DBHandler: if __name__ == '__main__': db.init_db(settings.DefaultConfig.SQLALCHEMY_DATABASE_URI) + from mishards import models fire.Fire(DBHandler) From 0a172fc8f6a98d9570c2f1a8cfafc1687bf6ea58 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 14:32:23 +0800 Subject: [PATCH 094/196] fix conftest --- shards/conftest.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/shards/conftest.py b/shards/conftest.py index 34e22af693..24ec19da1d 100644 --- a/shards/conftest.py +++ b/shards/conftest.py @@ -1,10 +1,19 @@ +import os import logging import pytest import grpc +import tempfile +import shutil from mishards import settings, db, create_app logger = logging.getLogger(__name__) +tpath = tempfile.mkdtemp() +dirpath = '{}/db'.format(tpath) +filepath = '{}/meta.sqlite'.format(dirpath) +os.makedirs(dirpath, 0o777) +settings.TestingConfig.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}?check_same_thread=False'.format( + filepath) @pytest.fixture def app(request): @@ -15,6 +24,7 @@ def app(request): yield app db.drop_all() + # shutil.rmtree(tpath) @pytest.fixture From 822a9c960b882b722d33befc16daf90b0f2aaf07 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 14:32:23 +0800 Subject: [PATCH 095/196] fix conftest fix conftest --- shards/conftest.py | 1 + shards/mishards/service_handler.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/shards/conftest.py b/shards/conftest.py index 24ec19da1d..aa4d409979 100644 --- a/shards/conftest.py +++ b/shards/conftest.py @@ -15,6 +15,7 @@ os.makedirs(dirpath, 0o777) settings.TestingConfig.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}?check_same_thread=False'.format( filepath) + @pytest.fixture def app(request): app = create_app(settings.TestingConfig) diff --git a/shards/mishards/service_handler.py b/shards/mishards/service_handler.py index 5e91c14f14..2f19152ae6 100644 --- a/shards/mishards/service_handler.py +++ b/shards/mishards/service_handler.py @@ -171,7 +171,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer): logger.info('HasTable {}'.format(_table_name)) _status, _bool = self._has_table(_table_name, - metadata={'resp_class': milvus_pb2.BoolReply}) + metadata={'resp_class': milvus_pb2.BoolReply}) return milvus_pb2.BoolReply(status=status_pb2.Status( error_code=_status.code, reason=_status.message), From c73d1d8342e8832a3c0ca13eee17bf7e3a717570 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 14:44:39 +0800 Subject: [PATCH 096/196] update CN tutorial --- shards/Tutorial_CN.md | 132 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 shards/Tutorial_CN.md diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md new file mode 100644 index 0000000000..74ddd5ef78 --- /dev/null +++ b/shards/Tutorial_CN.md @@ -0,0 +1,132 @@ +# Mishards使用文档 +--- +Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析。单个 Milvus 实例可处理十亿级数据规模,而对于百亿或者千亿规模数据的需求,则需要一个 Milvus 集群实例,该实例对于上层应用可以像单机实例一样使用,同时满足海量数据低延迟,高并发业务需求。mishards就是一个集群中间件,其内部处理请求转发,读写分离,水平扩展,动态扩容,为用户提供内存和算力可以无限扩容的 Milvus 实例。 + +## 运行环境 +--- + +### 单机快速启动实例 +**`python >= 3.4`环境** + +``` +1. cd milvus/shards +2. pip install -r requirements.txt +3. nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b +4. sudo chown -R $USER:$USER /tmp/milvus +5. cp mishards/.env.example to mishards/.env +6 +7. 在python mishards/main.py #.env配置mishards监听19532端口 +``` + +### 容器启动实例 +`all_in_one`会在服务器上开启两个milvus实例,一个mishards实例,一个jaeger链路追踪实例 + +**启动** +``` +1. 安装docker-compose +1. cd milvus/shards/all_in_one +2. docker-compose -f all_in_one.yml up -d #监听19531端口 +``` + +**打开Jaeger UI** +``` +浏览器打开 "http://127.0.0.1:16686/" +``` + +### kubernetes中快速启动 +**准备** +``` +- kubernetes集群 +- 安装nvidia-docker +- 共享存储 +- 安装kubectl并能访问集群 +``` + +**步骤** +``` +1. cd milvus/shards/kubernetes_demo/ +2. ./start.sh allup +3. watch -n 1 kubectl get pods -n milvus -o wide 查看所有pod状态,等待所有pod都处于Runing状态 +4. kubectl get service -n milvus 查看milvus-proxy-servers的EXTERNAL-IP和PORT, 这就是mishards集群的服务地址 +``` + +**扩容计算实例** +``` +./start.sh scale-ro-server 2 扩容计算实例到2 +``` + +**扩容代理器实例** +``` +./start.sh scale-proxy 2 扩容代理服务器实例到2 +``` + +**查看日志** +``` +kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 查看计算节点milvus-ro-servers-0日志 +``` + +## 测试 + +**启动单元测试** +``` +1. cd milvus/shards +2. pytest +``` + +**单元测试覆盖率** +``` +pytest --cov-report html:cov_html --cov=mishards +``` + +## mishards配置详解 + +### 全局 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| Debug | No | bool | True | 是否Debug工作模式 | +| TIMEZONE | No | string | "UTC" | 时区 | +| MAX_RETRY | No | int | 3 | 最大连接重试次数 | +| SERVER_PORT | No | int | 19530 | 配置服务端口 | +| WOSERVER | **Yes** | str | - | 配置后台可写Milvus实例地址。目前只支持静态设置,例"tcp://127.0.0.1:19530" | + +### 元数据 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| SQLALCHEMY_DATABASE_URI | **Yes** | string | - | 配置元数据存储数据库地址 | +| SQL_ECHO | No | bool | False | 是否打印Sql详细语句 | +| SQLALCHEMY_DATABASE_TEST_URI | No | string | - | 配置测试环境下元数据存储数据库地址 | +| SQL_TEST_ECHO | No | bool | False | 配置测试环境下是否打印Sql详细语句 | + +### 服务发现 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| SD_PROVIDER | No | string | "Kubernetes" | 配置服务发现服务类型,目前只有Static, Kubernetes可选 | +| SD_STATIC_HOSTS | No | list | [] | **SD_PROVIDER** 为**Static**时,配置服务地址列表,例"192.168.1.188,192.168.1.190"| +| SD_STATIC_PORT | No | int | 19530 | **SD_PROVIDER** 为**Static**时,配置Hosts监听端口 | +| SD_NAMESPACE | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,配置集群namespace | +| SD_IN_CLUSTER | No | bool | False | **SD_PROVIDER** 为**Kubernetes**时,标明服务发现是否在集群中运行 | +| SD_POLL_INTERVAL | No | int | 5 | **SD_PROVIDER** 为**Kubernetes**时,标明服务发现监听服务列表频率,单位Second | +| SD_ROSERVER_POD_PATT | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的正则表达式 | +| SD_LABEL_SELECTOR | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的标签选择 | + +### 链路追踪 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| TRACING_TYPE | No | string | "" | 链路追踪方案选择,目前只有Jaeger, 默认不使用| +| TRACING_SERVICE_NAME | No | string | "mishards" | **TRACING_TYPE** 为**Jaeger**时,链路追踪服务名 | +| TRACING_SAMPLER_TYPE | No | string | "const" | **TRACING_TYPE** 为**Jaeger**时,链路追踪采样类型 | +| TRACING_SAMPLER_PARAM | No | int | 1 | **TRACING_TYPE** 为**Jaeger**时,链路追踪采样频率 | +| TRACING_LOG_PAYLOAD | No | bool | False | **TRACING_TYPE** 为**Jaeger**时,链路追踪是否采集Payload | + +### 日志 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| LOG_LEVEL | No | string | "DEBUG" if Debug is ON else "INFO" | 日志记录级别 | +| LOG_PATH | No | string | "/tmp/mishards" | 日志记录路径 | +| LOG_NAME | No | string | "logfile" | 日志记录名 | + +### 路由 +| Name | Required | Type | Default Value | Explanation | +| --------------------------- | -------- | -------- | ------------- | ------------- | +| ROUTER_CLASS_NAME | No | string | FileBasedHashRingRouter | 处理请求路由类名, 可注册自定义类 | +| ROUTER_CLASS_TEST_NAME | No | string | FileBasedHashRingRouter | 测试环境下处理请求路由类名, 可注册自定义类 | From 74429c902d0404483cababf902d480d07159e7f9 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 20:24:03 +0800 Subject: [PATCH 097/196] implement plugin framework for tracer --- shards/mishards/__init__.py | 15 +++++--- shards/mishards/settings.py | 1 + shards/requirements.txt | 1 + shards/{tracing => tracer}/__init__.py | 0 shards/tracer/factory.py | 48 +++++++++++++++++++++++++ shards/tracer/plugins/jaeger_factory.py | 33 +++++++++++++++++ shards/tracing/factory.py | 40 --------------------- 7 files changed, 93 insertions(+), 45 deletions(-) rename shards/{tracing => tracer}/__init__.py (100%) create mode 100644 shards/tracer/factory.py create mode 100644 shards/tracer/plugins/jaeger_factory.py delete mode 100644 shards/tracing/factory.py diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index 7db3d8cb5e..c5ecbe93fc 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -19,17 +19,22 @@ def create_app(testing_config=None): from sd import ProviderManager sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) - discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, conn_mgr=connect_mgr) + discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, + conn_mgr=connect_mgr) - from tracing.factory import TracerFactory from mishards.grpc_utils import GrpcSpanDecorator - tracer = TracerFactory.new_tracer(config.TRACING_TYPE, settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) + from tracer.factory import TracerFactory + tracer = TracerFactory(config.TRACING_PLUGIN_PATH).create(config.TRACING_TYPE, + settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) from mishards.routings import RouterFactory router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) - grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, router=router, discover=discover) + grpc_server.init_app(conn_mgr=connect_mgr, + tracer=tracer, + router=router, + discover=discover) from mishards import exception_handlers diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py index 21a3bb7a65..08550374ad 100644 --- a/shards/mishards/settings.py +++ b/shards/mishards/settings.py @@ -74,6 +74,7 @@ class TracingConfig: class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) + TRACING_PLUGIN_PATH = env.str('TRACING_PLUGIN_PATH', '') TRACING_TYPE = env.str('TRACING_TYPE', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') diff --git a/shards/requirements.txt b/shards/requirements.txt index ae224e92ed..14bdde2a06 100644 --- a/shards/requirements.txt +++ b/shards/requirements.txt @@ -34,3 +34,4 @@ urllib3==1.25.3 jaeger-client>=3.4.0 grpcio-opentracing>=1.0 mock==2.0.0 +pluginbase==1.0.0 diff --git a/shards/tracing/__init__.py b/shards/tracer/__init__.py similarity index 100% rename from shards/tracing/__init__.py rename to shards/tracer/__init__.py diff --git a/shards/tracer/factory.py b/shards/tracer/factory.py new file mode 100644 index 0000000000..7ffed32bd0 --- /dev/null +++ b/shards/tracer/factory.py @@ -0,0 +1,48 @@ +import os +import logging +from functools import partial +from pluginbase import PluginBase + + +logger = logging.getLogger(__name__) + +here = os.path.abspath(os.path.dirname(__file__)) +get_path = partial(os.path.join, here) + +PLUGIN_PACKAGE_NAME = 'tracer.plugins' +plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, + searchpath=[get_path('./plugins')]) + +class TracerFactory(object): + def __init__(self, searchpath=None): + self.plugin_package_name = PLUGIN_PACKAGE_NAME + self.tracer_map = {} + searchpath = searchpath if searchpath else [] + searchpath = [searchpath] if isinstance(searchpath, str) else searchpath + self.source = plugin_base.make_plugin_source( + searchpath=searchpath, identifier=self.__class__.__name__) + + for plugin_name in self.source.list_plugins(): + plugin = self.source.load_plugin(plugin_name) + plugin.setup(self) + + def on_plugin_setup(self, plugin_class): + name = getattr(plugin_class, 'name', plugin_class.__name__) + self.tracer_map[name.lower()] = plugin_class + + def plugin(self, name): + return self.tracer_map.get(name, None) + + def create(self, + tracer_type, + tracer_config, + span_decorator=None, + **kwargs): + if not tracer_type: + return Tracer() + plugin_class = self.plugin(tracer_type.lower()) + if not plugin_class: + raise RuntimeError('Tracer Plugin \'{}\' not installed!'.format(tracer_type)) + + tracer = plugin_class.create(tracer_config, span_decorator=span_decorator, **kwargs) + return tracer diff --git a/shards/tracer/plugins/jaeger_factory.py b/shards/tracer/plugins/jaeger_factory.py new file mode 100644 index 0000000000..ec71fe427f --- /dev/null +++ b/shards/tracer/plugins/jaeger_factory.py @@ -0,0 +1,33 @@ +import logging +from jaeger_client import Config +from grpc_opentracing.grpcext import intercept_server +from grpc_opentracing import open_tracing_server_interceptor +from tracer import Tracer + +logger = logging.getLogger(__name__) + +PLUGIN_NAME = __name__ + +class JaegerFactory: + name = 'jaeger' + @classmethod + def create(cls, tracer_config, span_decorator=None, **kwargs): + tracing_config = tracer_config.TRACING_CONFIG + service_name = tracer_config.TRACING_SERVICE_NAME + validate = tracer_config.TRACING_VALIDATE + config = Config(config=tracing_config, + service_name=service_name, + validate=validate) + + tracer = config.initialize_tracer() + tracer_interceptor = open_tracing_server_interceptor( + tracer, + log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + span_decorator=span_decorator) + + return Tracer(tracer, tracer_interceptor, intercept_server) + + +def setup(app): + logger.debug('Plugin \'{}\' Installed In Package: {}'.format(PLUGIN_NAME, app.plugin_package_name)) + app.on_plugin_setup(JaegerFactory) diff --git a/shards/tracing/factory.py b/shards/tracing/factory.py deleted file mode 100644 index 14fcde2eb3..0000000000 --- a/shards/tracing/factory.py +++ /dev/null @@ -1,40 +0,0 @@ -import logging -from jaeger_client import Config -from grpc_opentracing.grpcext import intercept_server -from grpc_opentracing import open_tracing_server_interceptor - -from tracing import (Tracer, empty_server_interceptor_decorator) - -logger = logging.getLogger(__name__) - - -class TracerFactory: - @classmethod - def new_tracer(cls, - tracer_type, - tracer_config, - span_decorator=None, - **kwargs): - if not tracer_type: - return Tracer() - config = tracer_config.TRACING_CONFIG - service_name = tracer_config.TRACING_SERVICE_NAME - validate = tracer_config.TRACING_VALIDATE - # if not tracer_type: - # tracer_type = 'jaeger' - # config = tracer_config.DEFAULT_TRACING_CONFIG - - if tracer_type.lower() == 'jaeger': - config = Config(config=config, - service_name=service_name, - validate=validate) - - tracer = config.initialize_tracer() - tracer_interceptor = open_tracing_server_interceptor( - tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, - span_decorator=span_decorator) - - return Tracer(tracer, tracer_interceptor, intercept_server) - - assert False, 'Unsupported tracer type: {}'.format(tracer_type) From cf6df18446035bfaa46bd60434fb861ee5893e4f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 20:24:03 +0800 Subject: [PATCH 098/196] implement plugin framework for tracer implement plugin framework for tracer --- shards/tracer/plugins/jaeger_factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shards/tracer/plugins/jaeger_factory.py b/shards/tracer/plugins/jaeger_factory.py index ec71fe427f..384dbecaba 100644 --- a/shards/tracer/plugins/jaeger_factory.py +++ b/shards/tracer/plugins/jaeger_factory.py @@ -6,7 +6,7 @@ from tracer import Tracer logger = logging.getLogger(__name__) -PLUGIN_NAME = __name__ +PLUGIN_NAME = __file__ class JaegerFactory: name = 'jaeger' @@ -29,5 +29,5 @@ class JaegerFactory: def setup(app): - logger.debug('Plugin \'{}\' Installed In Package: {}'.format(PLUGIN_NAME, app.plugin_package_name)) + logger.info('Plugin \'{}\' Installed In Package: {}'.format(PLUGIN_NAME, app.plugin_package_name)) app.on_plugin_setup(JaegerFactory) From 2ab0e0eb93e655b2c7f71c73c1acb111f3b6a398 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Fri, 25 Oct 2019 20:39:44 +0800 Subject: [PATCH 099/196] fix unit test and code style changes --- shards/conftest.py | 1 + shards/mishards/__init__.py | 4 ++-- shards/tracer/factory.py | 16 +++++++++------- shards/tracer/plugins/jaeger_factory.py | 1 + 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/shards/conftest.py b/shards/conftest.py index aa4d409979..4cdcbdbe0c 100644 --- a/shards/conftest.py +++ b/shards/conftest.py @@ -25,6 +25,7 @@ def app(request): yield app db.drop_all() + app.stop() # shutil.rmtree(tpath) diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index c5ecbe93fc..72431c9b57 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -25,8 +25,8 @@ def create_app(testing_config=None): from mishards.grpc_utils import GrpcSpanDecorator from tracer.factory import TracerFactory tracer = TracerFactory(config.TRACING_PLUGIN_PATH).create(config.TRACING_TYPE, - settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) + settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) from mishards.routings import RouterFactory router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) diff --git a/shards/tracer/factory.py b/shards/tracer/factory.py index 7ffed32bd0..662ae29244 100644 --- a/shards/tracer/factory.py +++ b/shards/tracer/factory.py @@ -2,6 +2,7 @@ import os import logging from functools import partial from pluginbase import PluginBase +from tracer import Tracer logger = logging.getLogger(__name__) @@ -11,7 +12,8 @@ get_path = partial(os.path.join, here) PLUGIN_PACKAGE_NAME = 'tracer.plugins' plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, - searchpath=[get_path('./plugins')]) + searchpath=[get_path('./plugins')]) + class TracerFactory(object): def __init__(self, searchpath=None): @@ -19,8 +21,8 @@ class TracerFactory(object): self.tracer_map = {} searchpath = searchpath if searchpath else [] searchpath = [searchpath] if isinstance(searchpath, str) else searchpath - self.source = plugin_base.make_plugin_source( - searchpath=searchpath, identifier=self.__class__.__name__) + self.source = plugin_base.make_plugin_source(searchpath=searchpath, + identifier=self.__class__.__name__) for plugin_name in self.source.list_plugins(): plugin = self.source.load_plugin(plugin_name) @@ -34,10 +36,10 @@ class TracerFactory(object): return self.tracer_map.get(name, None) def create(self, - tracer_type, - tracer_config, - span_decorator=None, - **kwargs): + tracer_type, + tracer_config, + span_decorator=None, + **kwargs): if not tracer_type: return Tracer() plugin_class = self.plugin(tracer_type.lower()) diff --git a/shards/tracer/plugins/jaeger_factory.py b/shards/tracer/plugins/jaeger_factory.py index 384dbecaba..7b18a86130 100644 --- a/shards/tracer/plugins/jaeger_factory.py +++ b/shards/tracer/plugins/jaeger_factory.py @@ -8,6 +8,7 @@ logger = logging.getLogger(__name__) PLUGIN_NAME = __file__ + class JaegerFactory: name = 'jaeger' @classmethod From 63997d55ec142c73f1a84e6996861c316880567d Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 12:49:50 +0800 Subject: [PATCH 100/196] implement router plugins --- shards/mishards/__init__.py | 5 +- shards/mishards/router/__init__.py | 22 ++++++ shards/mishards/router/factory.py | 49 +++++++++++++ .../plugins/file_based_hash_ring_router.py} | 68 +++++-------------- shards/mishards/settings.py | 1 + shards/utils/pluginextension.py | 15 ++++ 6 files changed, 108 insertions(+), 52 deletions(-) create mode 100644 shards/mishards/router/__init__.py create mode 100644 shards/mishards/router/factory.py rename shards/mishards/{routings.py => router/plugins/file_based_hash_ring_router.py} (53%) create mode 100644 shards/utils/pluginextension.py diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index 72431c9b57..0c5ecd4d0e 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -28,8 +28,9 @@ def create_app(testing_config=None): settings.TracingConfig, span_decorator=GrpcSpanDecorator()) - from mishards.routings import RouterFactory - router = RouterFactory.new_router(config.ROUTER_CLASS_NAME, connect_mgr) + from mishards.router.factory import RouterFactory + router = RouterFactory(config.ROUTER_PLUGIN_PATH).create(config.ROUTER_CLASS_NAME, + conn_mgr=connect_mgr) grpc_server.init_app(conn_mgr=connect_mgr, tracer=tracer, diff --git a/shards/mishards/router/__init__.py b/shards/mishards/router/__init__.py new file mode 100644 index 0000000000..4150f3b736 --- /dev/null +++ b/shards/mishards/router/__init__.py @@ -0,0 +1,22 @@ +from mishards import exceptions + + +class RouterMixin: + def __init__(self, conn_mgr): + self.conn_mgr = conn_mgr + + def routing(self, table_name, metadata=None, **kwargs): + raise NotImplemented() + + def connection(self, metadata=None): + conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) + if conn: + conn.on_connect(metadata=metadata) + return conn.conn + + def query_conn(self, name, metadata=None): + conn = self.conn_mgr.conn(name, metadata=metadata) + if not conn: + raise exceptions.ConnectionNotFoundError(name, metadata=metadata) + conn.on_connect(metadata=metadata) + return conn.conn diff --git a/shards/mishards/router/factory.py b/shards/mishards/router/factory.py new file mode 100644 index 0000000000..2671cc3156 --- /dev/null +++ b/shards/mishards/router/factory.py @@ -0,0 +1,49 @@ +import os +import logging +from functools import partial +# from pluginbase import PluginBase +# import importlib +from utils.pluginextension import MiPluginBase + +logger = logging.getLogger(__name__) + +here = os.path.abspath(os.path.dirname(__file__)) +get_path = partial(os.path.join, here) + +PLUGIN_PACKAGE_NAME = 'router.plugins' +plugin_base = MiPluginBase(package=PLUGIN_PACKAGE_NAME, + searchpath=[get_path('./plugins')]) + + +class RouterFactory(object): + PLUGIN_TYPE = 'Router' + + def __init__(self, searchpath=None): + self.plugin_package_name = PLUGIN_PACKAGE_NAME + self.class_map = {} + searchpath = searchpath if searchpath else [] + searchpath = [searchpath] if isinstance(searchpath, str) else searchpath + self.source = plugin_base.make_plugin_source(searchpath=searchpath, + identifier=self.__class__.__name__) + + for plugin_name in self.source.list_plugins(): + plugin = self.source.load_plugin(plugin_name) + plugin.setup(self) + + def on_plugin_setup(self, plugin_class): + name = getattr(plugin_class, 'name', plugin_class.__name__) + self.class_map[name.lower()] = plugin_class + + def plugin(self, name): + return self.class_map.get(name, None) + + def create(self, class_name, class_config=None, **kwargs): + if not class_name: + raise RuntimeError('Please specify router class_name first!') + + this_class = self.plugin(class_name.lower()) + if not this_class: + raise RuntimeError('{} Plugin \'{}\' Not Installed!'.format(self.PLUGIN_TYPE, class_name)) + + router = this_class.create(class_config, **kwargs) + return router diff --git a/shards/mishards/routings.py b/shards/mishards/router/plugins/file_based_hash_ring_router.py similarity index 53% rename from shards/mishards/routings.py rename to shards/mishards/router/plugins/file_based_hash_ring_router.py index 823972726f..eddb425cfe 100644 --- a/shards/mishards/routings.py +++ b/shards/mishards/router/plugins/file_based_hash_ring_router.py @@ -1,64 +1,19 @@ import logging from sqlalchemy import exc as sqlalchemy_exc from sqlalchemy import and_ - +from mishards.models import Tables +from mishards.router import RouterMixin from mishards import exceptions, db from mishards.hash_ring import HashRing -from mishards.models import Tables logger = logging.getLogger(__name__) -class RouteManager: - ROUTER_CLASSES = {} - - @classmethod - def register_router_class(cls, target): - name = target.__dict__.get('NAME', None) - name = name if name else target.__class__.__name__ - cls.ROUTER_CLASSES[name] = target - return target - - @classmethod - def get_router_class(cls, name): - return cls.ROUTER_CLASSES.get(name, None) - - -class RouterFactory: - @classmethod - def new_router(cls, name, conn_mgr, **kwargs): - router_class = RouteManager.get_router_class(name) - assert router_class - return router_class(conn_mgr, **kwargs) - - -class RouterMixin: - def __init__(self, conn_mgr): - self.conn_mgr = conn_mgr - - def routing(self, table_name, metadata=None, **kwargs): - raise NotImplemented() - - def connection(self, metadata=None): - conn = self.conn_mgr.conn('WOSERVER', metadata=metadata) - if conn: - conn.on_connect(metadata=metadata) - return conn.conn - - def query_conn(self, name, metadata=None): - conn = self.conn_mgr.conn(name, metadata=metadata) - if not conn: - raise exceptions.ConnectionNotFoundError(name, metadata=metadata) - conn.on_connect(metadata=metadata) - return conn.conn - - -@RouteManager.register_router_class -class FileBasedHashRingRouter(RouterMixin): - NAME = 'FileBasedHashRingRouter' +class Factory(RouterMixin): + name = 'FileBasedHashRingRouter' def __init__(self, conn_mgr, **kwargs): - super(FileBasedHashRingRouter, self).__init__(conn_mgr) + super(Factory, self).__init__(conn_mgr) def routing(self, table_name, metadata=None, **kwargs): range_array = kwargs.pop('range_array', None) @@ -94,3 +49,16 @@ class FileBasedHashRingRouter(RouterMixin): routing[target_host]['file_ids'].append(str(f.id)) return routing + + @classmethod + def create(cls, config, **kwargs): + conn_mgr = kwargs.pop('conn_mgr', None) + if not conn_mgr: + raise RuntimeError('Cannot find \'conn_mgr\' to initialize \'{}\''.format(self.name)) + router = cls(conn_mgr, **kwargs) + return router + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name)) + app.on_plugin_setup(Factory) diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py index 08550374ad..c08e1d7a06 100644 --- a/shards/mishards/settings.py +++ b/shards/mishards/settings.py @@ -76,6 +76,7 @@ class DefaultConfig: SQL_ECHO = env.bool('SQL_ECHO', False) TRACING_PLUGIN_PATH = env.str('TRACING_PLUGIN_PATH', '') TRACING_TYPE = env.str('TRACING_TYPE', '') + ROUTER_PLUGIN_PATH = env.str('ROUTER_PLUGIN_PATH', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') diff --git a/shards/utils/pluginextension.py b/shards/utils/pluginextension.py new file mode 100644 index 0000000000..e5339b4fdc --- /dev/null +++ b/shards/utils/pluginextension.py @@ -0,0 +1,15 @@ +import importlib +from pluginbase import PluginBase, PluginSource + + +class MiPluginSource(PluginSource): + def load_plugin(self, name): + if '.' in name: + raise ImportError('Plugin names cannot contain dots.') + with self: + return importlib.import_module(self.base.package + '.' + name) + + +class MiPluginBase(PluginBase): + def make_plugin_source(self, *args, **kwargs): + return MiPluginSource(self, *args, **kwargs) From 4c774a77f5eab004764d46d200bea36360aa8499 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 14:03:51 +0800 Subject: [PATCH 101/196] fix PluginBase import bug --- shards/mishards/router/factory.py | 8 +++----- shards/tracer/factory.py | 3 ++- shards/utils/pluginextension.py | 11 ++++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/shards/mishards/router/factory.py b/shards/mishards/router/factory.py index 2671cc3156..66d549f2a6 100644 --- a/shards/mishards/router/factory.py +++ b/shards/mishards/router/factory.py @@ -1,9 +1,7 @@ import os import logging from functools import partial -# from pluginbase import PluginBase -# import importlib -from utils.pluginextension import MiPluginBase +from utils.pluginextension import MiPluginBase as PluginBase logger = logging.getLogger(__name__) @@ -11,8 +9,8 @@ here = os.path.abspath(os.path.dirname(__file__)) get_path = partial(os.path.join, here) PLUGIN_PACKAGE_NAME = 'router.plugins' -plugin_base = MiPluginBase(package=PLUGIN_PACKAGE_NAME, - searchpath=[get_path('./plugins')]) +plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, + searchpath=[get_path('./plugins')]) class RouterFactory(object): diff --git a/shards/tracer/factory.py b/shards/tracer/factory.py index 662ae29244..9342ca3d7e 100644 --- a/shards/tracer/factory.py +++ b/shards/tracer/factory.py @@ -1,7 +1,8 @@ import os import logging from functools import partial -from pluginbase import PluginBase +from utils.pluginextension import MiPluginBase as PluginBase +# from pluginbase import PluginBase from tracer import Tracer diff --git a/shards/utils/pluginextension.py b/shards/utils/pluginextension.py index e5339b4fdc..68413a4e55 100644 --- a/shards/utils/pluginextension.py +++ b/shards/utils/pluginextension.py @@ -1,13 +1,14 @@ -import importlib +import importlib.util from pluginbase import PluginBase, PluginSource class MiPluginSource(PluginSource): def load_plugin(self, name): - if '.' in name: - raise ImportError('Plugin names cannot contain dots.') - with self: - return importlib.import_module(self.base.package + '.' + name) + plugin = super().load_plugin(name) + spec = importlib.util.spec_from_file_location(self.base.package + '.' + name, plugin.__file__) + plugin = importlib.util.module_from_spec(spec) + spec.loader.exec_module(plugin) + return plugin class MiPluginBase(PluginBase): From 83818546db7dbc40b5a8d551a169c6a59bb88b9f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 14:14:27 +0800 Subject: [PATCH 102/196] fix bug for router --- shards/mishards/router/factory.py | 2 +- shards/mishards/test_server.py | 2 +- shards/tracer/factory.py | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/shards/mishards/router/factory.py b/shards/mishards/router/factory.py index 66d549f2a6..845f3ceabc 100644 --- a/shards/mishards/router/factory.py +++ b/shards/mishards/router/factory.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) here = os.path.abspath(os.path.dirname(__file__)) get_path = partial(os.path.join, here) -PLUGIN_PACKAGE_NAME = 'router.plugins' +PLUGIN_PACKAGE_NAME = 'mishards.router.plugins' plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, searchpath=[get_path('./plugins')]) diff --git a/shards/mishards/test_server.py b/shards/mishards/test_server.py index efd3912076..f0cde2184c 100644 --- a/shards/mishards/test_server.py +++ b/shards/mishards/test_server.py @@ -13,7 +13,7 @@ from mishards import db, create_app, settings from mishards.service_handler import ServiceHandler from mishards.grpc_utils.grpc_args_parser import GrpcArgsParser as Parser from mishards.factories import TableFilesFactory, TablesFactory, TableFiles, Tables -from mishards.routings import RouterMixin +from mishards.router import RouterMixin logger = logging.getLogger(__name__) diff --git a/shards/tracer/factory.py b/shards/tracer/factory.py index 9342ca3d7e..fff7a885e4 100644 --- a/shards/tracer/factory.py +++ b/shards/tracer/factory.py @@ -2,7 +2,6 @@ import os import logging from functools import partial from utils.pluginextension import MiPluginBase as PluginBase -# from pluginbase import PluginBase from tracer import Tracer From 1d39ec75b09d62604ad827664d4a0412cc4bbe8b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 16:27:16 +0800 Subject: [PATCH 103/196] implement service discovery plugins --- shards/discovery/__init__.py | 37 +++++++++++++ shards/discovery/factory.py | 53 ++++++++++++++++++ .../plugins}/kubernetes_provider.py | 55 ++++++++++++------- shards/discovery/plugins/static_provider.py | 43 +++++++++++++++ shards/mishards/__init__.py | 8 +-- shards/mishards/settings.py | 21 ++----- shards/sd/__init__.py | 28 ---------- shards/sd/static_provider.py | 39 ------------- shards/utils/__init__.py | 7 +++ 9 files changed, 183 insertions(+), 108 deletions(-) create mode 100644 shards/discovery/__init__.py create mode 100644 shards/discovery/factory.py rename shards/{sd => discovery/plugins}/kubernetes_provider.py (84%) create mode 100644 shards/discovery/plugins/static_provider.py delete mode 100644 shards/sd/__init__.py delete mode 100644 shards/sd/static_provider.py diff --git a/shards/discovery/__init__.py b/shards/discovery/__init__.py new file mode 100644 index 0000000000..a591d1cc1c --- /dev/null +++ b/shards/discovery/__init__.py @@ -0,0 +1,37 @@ +import os +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) + +import logging +from utils import dotdict + +logger = logging.getLogger(__name__) + + +class DiscoveryConfig(dotdict): + CONFIG_PREFIX = 'DISCOVERY_' + + def dump(self): + logger.info('----------- DiscoveryConfig -----------------') + for k, v in self.items(): + logger.info('{}: {}'.format(k, v)) + if len(self) <= 0: + logger.error(' Empty DiscoveryConfig Found! ') + logger.info('---------------------------------------------') + + @classmethod + def Create(cls, **kwargs): + o = cls() + + for k, v in os.environ.items(): + if not k.startswith(cls.CONFIG_PREFIX): + continue + o[k] = v + for k, v in kwargs.items(): + o[k] = v + + o.dump() + return o diff --git a/shards/discovery/factory.py b/shards/discovery/factory.py new file mode 100644 index 0000000000..a5713dcf37 --- /dev/null +++ b/shards/discovery/factory.py @@ -0,0 +1,53 @@ +import os +import logging +from functools import partial +from utils.pluginextension import MiPluginBase as PluginBase +from discovery import DiscoveryConfig + +logger = logging.getLogger(__name__) + +here = os.path.abspath(os.path.dirname(__file__)) +get_path = partial(os.path.join, here) + +PLUGIN_PACKAGE_NAME = 'discovery.plugins' +plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, + searchpath=[get_path('./plugins')]) + + +class DiscoveryFactory(object): + PLUGIN_TYPE = 'Discovery' + + def __init__(self, searchpath=None): + self.plugin_package_name = PLUGIN_PACKAGE_NAME + self.class_map = {} + searchpath = searchpath if searchpath else [] + searchpath = [searchpath] if isinstance(searchpath, str) else searchpath + self.source = plugin_base.make_plugin_source(searchpath=searchpath, + identifier=self.__class__.__name__) + + for plugin_name in self.source.list_plugins(): + plugin = self.source.load_plugin(plugin_name) + plugin.setup(self) + + def on_plugin_setup(self, plugin_class): + name = getattr(plugin_class, 'name', plugin_class.__name__) + self.class_map[name.lower()] = plugin_class + + def plugin(self, name): + return self.class_map.get(name, None) + + def create(self, class_name, **kwargs): + conn_mgr = kwargs.pop('conn_mgr', None) + if not conn_mgr: + raise RuntimeError('Please pass conn_mgr to create discovery!') + + if not class_name: + raise RuntimeError('Please specify \'{}\' class_name first!'.format(self.PLUGIN_TYPE)) + + plugin_class = self.plugin(class_name.lower()) + if not plugin_class: + raise RuntimeError('{} Plugin \'{}\' Not Installed!'.format(self.PLUGIN_TYPE, class_name)) + + plugin_config = DiscoveryConfig.Create() + plugin = plugin_class.create(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) + return plugin diff --git a/shards/sd/kubernetes_provider.py b/shards/discovery/plugins/kubernetes_provider.py similarity index 84% rename from shards/sd/kubernetes_provider.py rename to shards/discovery/plugins/kubernetes_provider.py index eb113db007..c9d9a3ad5a 100644 --- a/shards/sd/kubernetes_provider.py +++ b/shards/discovery/plugins/kubernetes_provider.py @@ -13,9 +13,6 @@ import queue import enum from kubernetes import client, config, watch -from utils import singleton -from sd import ProviderManager - logger = logging.getLogger(__name__) INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' @@ -42,6 +39,8 @@ class K8SMixin: class K8SHeartbeatHandler(threading.Thread, K8SMixin): + name = 'kubernetes' + def __init__(self, message_queue, namespace, @@ -235,18 +234,19 @@ class KubernetesProviderSettings: self.port = int(port) if port else 19530 -@singleton -@ProviderManager.register_service_provider class KubernetesProvider(object): - NAME = 'Kubernetes' + name = 'kubernetes' - def __init__(self, settings, conn_mgr, **kwargs): - self.namespace = settings.namespace - self.pod_patt = settings.pod_patt - self.label_selector = settings.label_selector - self.in_cluster = settings.in_cluster - self.poll_interval = settings.poll_interval - self.port = settings.port + def __init__(self, plugin_config, conn_mgr, **kwargs): + self.namespace = plugin_config.DISCOVERY_KUBERNETES_NAMESPACE + self.pod_patt = plugin_config.DISCOVERY_KUBERNETES_POD_PATT + self.label_selector = plugin_config.DISCOVERY_KUBERNETES_LABEL_SELECTOR + self.in_cluster = plugin_config.DISCOVERY_KUBERNETES_IN_CLUSTER.lower() + self.in_cluster = self.in_cluster == 'true' + self.poll_interval = plugin_config.DISCOVERY_KUBERNETES_POLL_INTERVAL + self.poll_interval = int(self.poll_interval) if self.poll_interval else 5 + self.port = plugin_config.DISCOVERY_KUBERNETES_PORT + self.port = int(self.port) if self.port else 19530 self.kwargs = kwargs self.queue = queue.Queue() @@ -298,9 +298,23 @@ class KubernetesProvider(object): self.pod_heartbeater.stop() self.event_handler.stop() + @classmethod + def create(cls, conn_mgr, plugin_config, **kwargs): + discovery = cls(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) + return discovery + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name)) + app.on_plugin_setup(KubernetesProvider) + if __name__ == '__main__': logging.basicConfig(level=logging.INFO) + sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))))) + sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__))))) class Connect: def register(self, name, value): @@ -315,14 +329,15 @@ if __name__ == '__main__': connect_mgr = Connect() - settings = KubernetesProviderSettings(namespace='xp', - pod_patt=".*-ro-servers-.*", - label_selector='tier=ro-servers', - poll_interval=5, - in_cluster=False) + from discovery import DiscoveryConfig + settings = DiscoveryConfig(DISCOVERY_KUBERNETES_NAMESPACE='xp', + DISCOVERY_KUBERNETES_POD_PATT=".*-ro-servers-.*", + DISCOVERY_KUBERNETES_LABEL_SELECTOR='tier=ro-servers', + DISCOVERY_KUBERNETES_POLL_INTERVAL=5, + DISCOVERY_KUBERNETES_IN_CLUSTER=False) - provider_class = ProviderManager.get_provider('Kubernetes') - t = provider_class(conn_mgr=connect_mgr, settings=settings) + provider_class = KubernetesProvider + t = provider_class(conn_mgr=connect_mgr, plugin_config=settings) t.start() cnt = 100 while cnt > 0: diff --git a/shards/discovery/plugins/static_provider.py b/shards/discovery/plugins/static_provider.py new file mode 100644 index 0000000000..0f8bdb3d25 --- /dev/null +++ b/shards/discovery/plugins/static_provider.py @@ -0,0 +1,43 @@ +import os +import sys +if __name__ == '__main__': + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import logging +import socket + +logger = logging.getLogger(__name__) + + +class StaticDiscovery(object): + name = 'static' + + def __init__(self, config, conn_mgr, **kwargs): + self.conn_mgr = conn_mgr + hosts = [config.DISCOVERY_STATIC_HOSTS] if isinstance(config.DISCOVERY_STATIC_HOSTS, str) else hosts + self.hosts = [socket.gethostbyname(host) for host in hosts] + self.port = config.DISCOVERY_STATIC_PORT + + def start(self): + for host in self.hosts: + self.add_pod(host, host) + + def stop(self): + for host in self.hosts: + self.delete_pod(host) + + def add_pod(self, name, ip): + self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) + + def delete_pod(self, name): + self.conn_mgr.unregister(name) + + @classmethod + def create(cls, conn_mgr, plugin_config, **kwargs): + discovery = cls(config=plugin_config, conn_mgr=conn_mgr, **kwargs) + return discovery + + +def setup(app): + logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name)) + app.on_plugin_setup(StaticDiscovery) diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index 0c5ecd4d0e..e0792348a9 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -16,11 +16,9 @@ def create_app(testing_config=None): from mishards.connections import ConnectionMgr connect_mgr = ConnectionMgr() - from sd import ProviderManager - - sd_proiver_class = ProviderManager.get_provider(settings.SD_PROVIDER) - discover = sd_proiver_class(settings=settings.SD_PROVIDER_SETTINGS, - conn_mgr=connect_mgr) + from discovery.factory import DiscoveryFactory + discover = DiscoveryFactory(config.DISCOVERY_PLUGIN_PATH).create(config.DISCOVERY_CLASS_NAME, + conn_mgr=connect_mgr) from mishards.grpc_utils import GrpcSpanDecorator from tracer.factory import TracerFactory diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py index c08e1d7a06..6935405091 100644 --- a/shards/mishards/settings.py +++ b/shards/mishards/settings.py @@ -11,6 +11,7 @@ if FROM_EXAMPLE: else: env.read_env() + DEBUG = env.bool('DEBUG', False) LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') @@ -28,22 +29,8 @@ SERVER_PORT = env.int('SERVER_PORT', 19530) SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) WOSERVER = env.str('WOSERVER') -SD_PROVIDER_SETTINGS = None -SD_PROVIDER = env.str('SD_PROVIDER', 'Kubernetes') -if SD_PROVIDER == 'Kubernetes': - from sd.kubernetes_provider import KubernetesProviderSettings - SD_PROVIDER_SETTINGS = KubernetesProviderSettings( - namespace=env.str('SD_NAMESPACE', ''), - in_cluster=env.bool('SD_IN_CLUSTER', False), - poll_interval=env.int('SD_POLL_INTERVAL', 5), - pod_patt=env.str('SD_ROSERVER_POD_PATT', ''), - label_selector=env.str('SD_LABEL_SELECTOR', ''), - port=env.int('SD_PORT', 19530)) -elif SD_PROVIDER == 'Static': - from sd.static_provider import StaticProviderSettings - SD_PROVIDER_SETTINGS = StaticProviderSettings( - hosts=env.list('SD_STATIC_HOSTS', []), - port=env.int('SD_STATIC_PORT', 19530)) +DISCOVERY_STATIC_HOSTS = env.list('DISCOVERY_STATIC_HOSTS', []) +DISCOVERY_STATIC_PORT = env.int('DISCOVERY_STATIC_PORT', 19530) # TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') @@ -78,6 +65,8 @@ class DefaultConfig: TRACING_TYPE = env.str('TRACING_TYPE', '') ROUTER_PLUGIN_PATH = env.str('ROUTER_PLUGIN_PATH', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') + DISCOVERY_PLUGIN_PATH = env.str('DISCOVERY_PLUGIN_PATH', '') + DISCOVERY_CLASS_NAME = env.str('DISCOVERY_CLASS_NAME', 'static') class TestingConfig(DefaultConfig): diff --git a/shards/sd/__init__.py b/shards/sd/__init__.py deleted file mode 100644 index 7943887d0f..0000000000 --- a/shards/sd/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging -import inspect -# from utils import singleton - -logger = logging.getLogger(__name__) - - -class ProviderManager: - PROVIDERS = {} - - @classmethod - def register_service_provider(cls, target): - if inspect.isfunction(target): - cls.PROVIDERS[target.__name__] = target - elif inspect.isclass(target): - name = target.__dict__.get('NAME', None) - name = name if name else target.__class__.__name__ - cls.PROVIDERS[name] = target - else: - assert False, 'Cannot register_service_provider for: {}'.format(target) - return target - - @classmethod - def get_provider(cls, name): - return cls.PROVIDERS.get(name, None) - - -from sd import kubernetes_provider, static_provider diff --git a/shards/sd/static_provider.py b/shards/sd/static_provider.py deleted file mode 100644 index e88780740f..0000000000 --- a/shards/sd/static_provider.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import sys -if __name__ == '__main__': - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -import socket -from utils import singleton -from sd import ProviderManager - - -class StaticProviderSettings: - def __init__(self, hosts, port=None): - self.hosts = hosts - self.port = int(port) if port else 19530 - - -@singleton -@ProviderManager.register_service_provider -class KubernetesProvider(object): - NAME = 'Static' - - def __init__(self, settings, conn_mgr, **kwargs): - self.conn_mgr = conn_mgr - self.hosts = [socket.gethostbyname(host) for host in settings.hosts] - self.port = settings.port - - def start(self): - for host in self.hosts: - self.add_pod(host, host) - - def stop(self): - for host in self.hosts: - self.delete_pod(host) - - def add_pod(self, name, ip): - self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port)) - - def delete_pod(self, name): - self.conn_mgr.unregister(name) diff --git a/shards/utils/__init__.py b/shards/utils/__init__.py index c1d55e76c0..cf444c0680 100644 --- a/shards/utils/__init__.py +++ b/shards/utils/__init__.py @@ -9,3 +9,10 @@ def singleton(cls): instances[cls] = cls(*args, **kw) return instances[cls] return getinstance + + +class dotdict(dict): + """dot.notation access to dictionary attributes""" + __getattr__ = dict.get + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ From cd0a112f5583bf2351cc4355dbf7be5ffbce615f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 16:54:31 +0800 Subject: [PATCH 104/196] add plugins base mixin --- shards/discovery/factory.py | 40 ++++---------------------------- shards/utils/plugins/__init__.py | 39 +++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 36 deletions(-) create mode 100644 shards/utils/plugins/__init__.py diff --git a/shards/discovery/factory.py b/shards/discovery/factory.py index a5713dcf37..80334daf68 100644 --- a/shards/discovery/factory.py +++ b/shards/discovery/factory.py @@ -1,53 +1,21 @@ -import os import logging -from functools import partial -from utils.pluginextension import MiPluginBase as PluginBase from discovery import DiscoveryConfig +from utils.plugins import BaseMixin logger = logging.getLogger(__name__) - -here = os.path.abspath(os.path.dirname(__file__)) -get_path = partial(os.path.join, here) - PLUGIN_PACKAGE_NAME = 'discovery.plugins' -plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, - searchpath=[get_path('./plugins')]) -class DiscoveryFactory(object): +class DiscoveryFactory(BaseMixin): PLUGIN_TYPE = 'Discovery' - def __init__(self, searchpath=None): - self.plugin_package_name = PLUGIN_PACKAGE_NAME - self.class_map = {} - searchpath = searchpath if searchpath else [] - searchpath = [searchpath] if isinstance(searchpath, str) else searchpath - self.source = plugin_base.make_plugin_source(searchpath=searchpath, - identifier=self.__class__.__name__) + super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) - for plugin_name in self.source.list_plugins(): - plugin = self.source.load_plugin(plugin_name) - plugin.setup(self) - - def on_plugin_setup(self, plugin_class): - name = getattr(plugin_class, 'name', plugin_class.__name__) - self.class_map[name.lower()] = plugin_class - - def plugin(self, name): - return self.class_map.get(name, None) - - def create(self, class_name, **kwargs): + def _create(self, plugin_class, **kwargs): conn_mgr = kwargs.pop('conn_mgr', None) if not conn_mgr: raise RuntimeError('Please pass conn_mgr to create discovery!') - if not class_name: - raise RuntimeError('Please specify \'{}\' class_name first!'.format(self.PLUGIN_TYPE)) - - plugin_class = self.plugin(class_name.lower()) - if not plugin_class: - raise RuntimeError('{} Plugin \'{}\' Not Installed!'.format(self.PLUGIN_TYPE, class_name)) - plugin_config = DiscoveryConfig.Create() plugin = plugin_class.create(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) return plugin diff --git a/shards/utils/plugins/__init__.py b/shards/utils/plugins/__init__.py new file mode 100644 index 0000000000..361dda66f9 --- /dev/null +++ b/shards/utils/plugins/__init__.py @@ -0,0 +1,39 @@ +import os +import inspect +from functools import partial +from utils.pluginextension import MiPluginBase as PluginBase + + +class BaseMixin(object): + def __init__(self, package_name, searchpath=None): + self.plugin_package_name = package_name + caller_path = os.path.dirname(inspect.stack()[1][1]) + get_path = partial(os.path.join, caller_path) + plugin_base = PluginBase(package=self.plugin_package_name, + searchpath=[get_path('./plugins')]) + self.class_map = {} + searchpath = searchpath if searchpath else [] + searchpath = [searchpath] if isinstance(searchpath, str) else searchpath + self.source = plugin_base.make_plugin_source(searchpath=searchpath, + identifier=self.__class__.__name__) + + for plugin_name in self.source.list_plugins(): + plugin = self.source.load_plugin(plugin_name) + plugin.setup(self) + + def on_plugin_setup(self, plugin_class): + name = getattr(plugin_class, 'name', plugin_class.__name__) + self.class_map[name.lower()] = plugin_class + + def plugin(self, name): + return self.class_map.get(name, None) + + def create(self, class_name, **kwargs): + if not class_name: + raise RuntimeError('Please specify \'{}\' class_name first!'.format(self.PLUGIN_TYPE)) + + plugin_class = self.plugin(class_name.lower()) + if not plugin_class: + raise RuntimeError('{} Plugin \'{}\' Not Installed!'.format(self.PLUGIN_TYPE, class_name)) + + return self._create(plugin_class, **kwargs) From 4ef3e416fa67903d807640556c473f363a0062d9 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 17:02:14 +0800 Subject: [PATCH 105/196] refactor router plugins --- shards/mishards/router/factory.py | 40 +++---------------- .../plugins/file_based_hash_ring_router.py | 2 +- 2 files changed, 6 insertions(+), 36 deletions(-) diff --git a/shards/mishards/router/factory.py b/shards/mishards/router/factory.py index 845f3ceabc..ea29a26a1d 100644 --- a/shards/mishards/router/factory.py +++ b/shards/mishards/router/factory.py @@ -1,47 +1,17 @@ import os import logging -from functools import partial -from utils.pluginextension import MiPluginBase as PluginBase +from utils.plugins import BaseMixin logger = logging.getLogger(__name__) - -here = os.path.abspath(os.path.dirname(__file__)) -get_path = partial(os.path.join, here) - PLUGIN_PACKAGE_NAME = 'mishards.router.plugins' -plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, - searchpath=[get_path('./plugins')]) -class RouterFactory(object): +class RouterFactory(BaseMixin): PLUGIN_TYPE = 'Router' def __init__(self, searchpath=None): - self.plugin_package_name = PLUGIN_PACKAGE_NAME - self.class_map = {} - searchpath = searchpath if searchpath else [] - searchpath = [searchpath] if isinstance(searchpath, str) else searchpath - self.source = plugin_base.make_plugin_source(searchpath=searchpath, - identifier=self.__class__.__name__) + super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) - for plugin_name in self.source.list_plugins(): - plugin = self.source.load_plugin(plugin_name) - plugin.setup(self) - - def on_plugin_setup(self, plugin_class): - name = getattr(plugin_class, 'name', plugin_class.__name__) - self.class_map[name.lower()] = plugin_class - - def plugin(self, name): - return self.class_map.get(name, None) - - def create(self, class_name, class_config=None, **kwargs): - if not class_name: - raise RuntimeError('Please specify router class_name first!') - - this_class = self.plugin(class_name.lower()) - if not this_class: - raise RuntimeError('{} Plugin \'{}\' Not Installed!'.format(self.PLUGIN_TYPE, class_name)) - - router = this_class.create(class_config, **kwargs) + def _create(self, plugin_class, **kwargs): + router = plugin_class.create(**kwargs) return router diff --git a/shards/mishards/router/plugins/file_based_hash_ring_router.py b/shards/mishards/router/plugins/file_based_hash_ring_router.py index eddb425cfe..4697189f35 100644 --- a/shards/mishards/router/plugins/file_based_hash_ring_router.py +++ b/shards/mishards/router/plugins/file_based_hash_ring_router.py @@ -51,7 +51,7 @@ class Factory(RouterMixin): return routing @classmethod - def create(cls, config, **kwargs): + def create(cls, **kwargs): conn_mgr = kwargs.pop('conn_mgr', None) if not conn_mgr: raise RuntimeError('Cannot find \'conn_mgr\' to initialize \'{}\''.format(self.name)) From ccc80808daf9b8ece9dd860a9ac9e5d1305a662e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 17:19:57 +0800 Subject: [PATCH 106/196] refactor all plugins --- shards/discovery/factory.py | 3 +- .../discovery/plugins/kubernetes_provider.py | 2 +- shards/discovery/plugins/static_provider.py | 2 +- shards/mishards/__init__.py | 2 +- shards/mishards/router/factory.py | 2 +- .../plugins/file_based_hash_ring_router.py | 2 +- shards/tracer/factory.py | 53 ++++++------------- shards/tracer/plugins/jaeger_factory.py | 11 ++-- shards/utils/plugins/__init__.py | 3 +- 9 files changed, 30 insertions(+), 50 deletions(-) diff --git a/shards/discovery/factory.py b/shards/discovery/factory.py index 80334daf68..5f5c7fcf95 100644 --- a/shards/discovery/factory.py +++ b/shards/discovery/factory.py @@ -8,6 +8,7 @@ PLUGIN_PACKAGE_NAME = 'discovery.plugins' class DiscoveryFactory(BaseMixin): PLUGIN_TYPE = 'Discovery' + def __init__(self, searchpath=None): super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) @@ -17,5 +18,5 @@ class DiscoveryFactory(BaseMixin): raise RuntimeError('Please pass conn_mgr to create discovery!') plugin_config = DiscoveryConfig.Create() - plugin = plugin_class.create(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) + plugin = plugin_class.Create(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) return plugin diff --git a/shards/discovery/plugins/kubernetes_provider.py b/shards/discovery/plugins/kubernetes_provider.py index c9d9a3ad5a..aaf6091f83 100644 --- a/shards/discovery/plugins/kubernetes_provider.py +++ b/shards/discovery/plugins/kubernetes_provider.py @@ -299,7 +299,7 @@ class KubernetesProvider(object): self.event_handler.stop() @classmethod - def create(cls, conn_mgr, plugin_config, **kwargs): + def Create(cls, conn_mgr, plugin_config, **kwargs): discovery = cls(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs) return discovery diff --git a/shards/discovery/plugins/static_provider.py b/shards/discovery/plugins/static_provider.py index 0f8bdb3d25..9bea62f2da 100644 --- a/shards/discovery/plugins/static_provider.py +++ b/shards/discovery/plugins/static_provider.py @@ -33,7 +33,7 @@ class StaticDiscovery(object): self.conn_mgr.unregister(name) @classmethod - def create(cls, conn_mgr, plugin_config, **kwargs): + def Create(cls, conn_mgr, plugin_config, **kwargs): discovery = cls(config=plugin_config, conn_mgr=conn_mgr, **kwargs) return discovery diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index e0792348a9..96463caa93 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -23,7 +23,7 @@ def create_app(testing_config=None): from mishards.grpc_utils import GrpcSpanDecorator from tracer.factory import TracerFactory tracer = TracerFactory(config.TRACING_PLUGIN_PATH).create(config.TRACING_TYPE, - settings.TracingConfig, + plugin_config=settings.TracingConfig, span_decorator=GrpcSpanDecorator()) from mishards.router.factory import RouterFactory diff --git a/shards/mishards/router/factory.py b/shards/mishards/router/factory.py index ea29a26a1d..a8f85c0df8 100644 --- a/shards/mishards/router/factory.py +++ b/shards/mishards/router/factory.py @@ -13,5 +13,5 @@ class RouterFactory(BaseMixin): super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) def _create(self, plugin_class, **kwargs): - router = plugin_class.create(**kwargs) + router = plugin_class.Create(**kwargs) return router diff --git a/shards/mishards/router/plugins/file_based_hash_ring_router.py b/shards/mishards/router/plugins/file_based_hash_ring_router.py index 4697189f35..b90935129e 100644 --- a/shards/mishards/router/plugins/file_based_hash_ring_router.py +++ b/shards/mishards/router/plugins/file_based_hash_ring_router.py @@ -51,7 +51,7 @@ class Factory(RouterMixin): return routing @classmethod - def create(cls, **kwargs): + def Create(cls, **kwargs): conn_mgr = kwargs.pop('conn_mgr', None) if not conn_mgr: raise RuntimeError('Cannot find \'conn_mgr\' to initialize \'{}\''.format(self.name)) diff --git a/shards/tracer/factory.py b/shards/tracer/factory.py index fff7a885e4..0e54a5aeb6 100644 --- a/shards/tracer/factory.py +++ b/shards/tracer/factory.py @@ -1,50 +1,27 @@ import os import logging -from functools import partial -from utils.pluginextension import MiPluginBase as PluginBase from tracer import Tracer - +from utils.plugins import BaseMixin logger = logging.getLogger(__name__) - -here = os.path.abspath(os.path.dirname(__file__)) -get_path = partial(os.path.join, here) - PLUGIN_PACKAGE_NAME = 'tracer.plugins' -plugin_base = PluginBase(package=PLUGIN_PACKAGE_NAME, - searchpath=[get_path('./plugins')]) -class TracerFactory(object): +class TracerFactory(BaseMixin): + PLUGIN_TYPE = 'Tracer' + def __init__(self, searchpath=None): - self.plugin_package_name = PLUGIN_PACKAGE_NAME - self.tracer_map = {} - searchpath = searchpath if searchpath else [] - searchpath = [searchpath] if isinstance(searchpath, str) else searchpath - self.source = plugin_base.make_plugin_source(searchpath=searchpath, - identifier=self.__class__.__name__) + super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME) - for plugin_name in self.source.list_plugins(): - plugin = self.source.load_plugin(plugin_name) - plugin.setup(self) - - def on_plugin_setup(self, plugin_class): - name = getattr(plugin_class, 'name', plugin_class.__name__) - self.tracer_map[name.lower()] = plugin_class - - def plugin(self, name): - return self.tracer_map.get(name, None) - - def create(self, - tracer_type, - tracer_config, - span_decorator=None, - **kwargs): - if not tracer_type: + def create(self, class_name, **kwargs): + if not class_name: return Tracer() - plugin_class = self.plugin(tracer_type.lower()) - if not plugin_class: - raise RuntimeError('Tracer Plugin \'{}\' not installed!'.format(tracer_type)) + return super().create(class_name, **kwargs) - tracer = plugin_class.create(tracer_config, span_decorator=span_decorator, **kwargs) - return tracer + def _create(self, plugin_class, **kwargs): + plugin_config = kwargs.pop('plugin_config', None) + if not plugin_config: + raise RuntimeError('\'{}\' Plugin Config is Required!'.format(self.PLUGIN_TYPE)) + + plugin = plugin_class.Create(plugin_config=plugin_config, **kwargs) + return plugin diff --git a/shards/tracer/plugins/jaeger_factory.py b/shards/tracer/plugins/jaeger_factory.py index 7b18a86130..923f2f805d 100644 --- a/shards/tracer/plugins/jaeger_factory.py +++ b/shards/tracer/plugins/jaeger_factory.py @@ -12,10 +12,11 @@ PLUGIN_NAME = __file__ class JaegerFactory: name = 'jaeger' @classmethod - def create(cls, tracer_config, span_decorator=None, **kwargs): - tracing_config = tracer_config.TRACING_CONFIG - service_name = tracer_config.TRACING_SERVICE_NAME - validate = tracer_config.TRACING_VALIDATE + def Create(cls, plugin_config, **kwargs): + tracing_config = plugin_config.TRACING_CONFIG + span_decorator = kwargs.pop('span_decorator', None) + service_name = plugin_config.TRACING_SERVICE_NAME + validate = plugin_config.TRACING_VALIDATE config = Config(config=tracing_config, service_name=service_name, validate=validate) @@ -23,7 +24,7 @@ class JaegerFactory: tracer = config.initialize_tracer() tracer_interceptor = open_tracing_server_interceptor( tracer, - log_payloads=tracer_config.TRACING_LOG_PAYLOAD, + log_payloads=plugin_config.TRACING_LOG_PAYLOAD, span_decorator=span_decorator) return Tracer(tracer, tracer_interceptor, intercept_server) diff --git a/shards/utils/plugins/__init__.py b/shards/utils/plugins/__init__.py index 361dda66f9..633f1164a7 100644 --- a/shards/utils/plugins/__init__.py +++ b/shards/utils/plugins/__init__.py @@ -5,7 +5,8 @@ from utils.pluginextension import MiPluginBase as PluginBase class BaseMixin(object): - def __init__(self, package_name, searchpath=None): + + def __init__(self, package_name, searchpath=None): self.plugin_package_name = package_name caller_path = os.path.dirname(inspect.stack()[1][1]) get_path = partial(os.path.join, caller_path) From 8630077e8a0ef184506978e301afaa3bd0ae0b8b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Sat, 26 Oct 2019 17:26:59 +0800 Subject: [PATCH 107/196] refactor tracer settings --- shards/mishards/.env.example | 26 +++++++++++++++----------- shards/mishards/__init__.py | 2 +- shards/mishards/settings.py | 4 ++-- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/shards/mishards/.env.example b/shards/mishards/.env.example index 0a23c0cf56..8c8e696c31 100644 --- a/shards/mishards/.env.example +++ b/shards/mishards/.env.example @@ -4,17 +4,6 @@ WOSERVER=tcp://127.0.0.1:19530 SERVER_PORT=19532 SERVER_TEST_PORT=19888 -SD_PROVIDER=Static - -SD_NAMESPACE=xp -SD_IN_CLUSTER=False -SD_POLL_INTERVAL=5 -SD_ROSERVER_POD_PATT=.*-ro-servers-.* -SD_LABEL_SELECTOR=tier=ro-servers - -SD_STATIC_HOSTS=127.0.0.1 -SD_STATIC_PORT=19530 - #SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_ECHO=True @@ -31,3 +20,18 @@ TRACING_SAMPLER_PARAM=1 TRACING_LOG_PAYLOAD=True #TRACING_SAMPLER_TYPE=probabilistic #TRACING_SAMPLER_PARAM=0.5 + +TRACER_PLUGIN_PATH=/tmp/plugins +# TRACER_CLASS_NAME= + +#DISCOVERY_PLUGIN_PATH= +#DISCOVERY_CLASS_NAME=kubernetes + +DISCOVERY_STATIC_HOSTS=127.0.0.1 +DISCOVERY_STATIC_PORT=19530 + +DISCOVERY_KUBERNETES_NAMESPACE=xp +DISCOVERY_KUBERNETES_POD_PATT=.*-ro-servers-.* +DISCOVERY_KUBERNETES_LABEL_SELECTOR=tier=ro-servers +DISCOVERY_KUBERNETES_POLL_INTERVAL=5 +DISCOVERY_KUBERNETES_IN_CLUSTER=False diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index 96463caa93..b4c51cc4f5 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -22,7 +22,7 @@ def create_app(testing_config=None): from mishards.grpc_utils import GrpcSpanDecorator from tracer.factory import TracerFactory - tracer = TracerFactory(config.TRACING_PLUGIN_PATH).create(config.TRACING_TYPE, + tracer = TracerFactory(config.TRACER_PLUGIN_PATH).create(config.TRACER_CLASS_NAME, plugin_config=settings.TracingConfig, span_decorator=GrpcSpanDecorator()) diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py index 6935405091..09b7b0713f 100644 --- a/shards/mishards/settings.py +++ b/shards/mishards/settings.py @@ -61,8 +61,8 @@ class TracingConfig: class DefaultConfig: SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_URI') SQL_ECHO = env.bool('SQL_ECHO', False) - TRACING_PLUGIN_PATH = env.str('TRACING_PLUGIN_PATH', '') - TRACING_TYPE = env.str('TRACING_TYPE', '') + TRACER_PLUGIN_PATH = env.str('TRACER_PLUGIN_PATH', '') + TRACER_CLASS_NAME = env.str('TRACER_CLASS_NAME', '') ROUTER_PLUGIN_PATH = env.str('ROUTER_PLUGIN_PATH', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_NAME', 'FileBasedHashRingRouter') DISCOVERY_PLUGIN_PATH = env.str('DISCOVERY_PLUGIN_PATH', '') From 9f52316704ac54812bd9e8c5a22456612ccefb62 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 28 Oct 2019 10:29:21 +0800 Subject: [PATCH 108/196] change static discovery init from env --- shards/discovery/plugins/static_provider.py | 6 ++++-- shards/mishards/settings.py | 7 +------ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/shards/discovery/plugins/static_provider.py b/shards/discovery/plugins/static_provider.py index 9bea62f2da..fca8c717db 100644 --- a/shards/discovery/plugins/static_provider.py +++ b/shards/discovery/plugins/static_provider.py @@ -5,8 +5,10 @@ if __name__ == '__main__': import logging import socket +from environs import Env logger = logging.getLogger(__name__) +env = Env() class StaticDiscovery(object): @@ -14,9 +16,9 @@ class StaticDiscovery(object): def __init__(self, config, conn_mgr, **kwargs): self.conn_mgr = conn_mgr - hosts = [config.DISCOVERY_STATIC_HOSTS] if isinstance(config.DISCOVERY_STATIC_HOSTS, str) else hosts + hosts = env.list('DISCOVERY_STATIC_HOSTS', []) + self.port = env.int('DISCOVERY_STATIC_PORT', 19530) self.hosts = [socket.gethostbyname(host) for host in hosts] - self.port = config.DISCOVERY_STATIC_PORT def start(self): for host in self.hosts: diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py index 09b7b0713f..2694cd0a1f 100644 --- a/shards/mishards/settings.py +++ b/shards/mishards/settings.py @@ -29,11 +29,6 @@ SERVER_PORT = env.int('SERVER_PORT', 19530) SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) WOSERVER = env.str('WOSERVER') -DISCOVERY_STATIC_HOSTS = env.list('DISCOVERY_STATIC_HOSTS', []) -DISCOVERY_STATIC_PORT = env.int('DISCOVERY_STATIC_PORT', 19530) - -# TESTING_WOSERVER = env.str('TESTING_WOSERVER', 'tcp://127.0.0.1:19530') - class TracingConfig: TRACING_SERVICE_NAME = env.str('TRACING_SERVICE_NAME', 'mishards') @@ -72,7 +67,7 @@ class DefaultConfig: class TestingConfig(DefaultConfig): SQLALCHEMY_DATABASE_URI = env.str('SQLALCHEMY_DATABASE_TEST_URI', '') SQL_ECHO = env.bool('SQL_TEST_ECHO', False) - TRACING_TYPE = env.str('TRACING_TEST_TYPE', '') + TRACER_CLASS_NAME = env.str('TRACER_CLASS_TEST_NAME', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') From c2400f3167412ba18b6e15cd097553da8e92c70e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 28 Oct 2019 10:30:24 +0800 Subject: [PATCH 109/196] change all_in_one.yml for updated source changes --- shards/start_services.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/shards/start_services.yml b/shards/start_services.yml index 57fe061bb7..286230feeb 100644 --- a/shards/start_services.yml +++ b/shards/start_services.yml @@ -33,9 +33,10 @@ services: FROM_EXAMPLE: 'true' DEBUG: 'true' SERVER_PORT: 19531 - WOSERVER: tcp://milvus:19530 - SD_STATIC_HOSTS: milvus - TRACING_TYPE: jaeger + WOSERVER: tcp://milvus_wr:19530 + DISCOVERY_PLUGIN_PATH: static + DISCOVERY_STATIC_HOSTS: milvus_wr,milvus_ro + TRACER_CLASS_NAME: jaeger TRACING_SERVICE_NAME: mishards-demo TRACING_REPORTING_HOST: jaeger TRACING_REPORTING_PORT: 5775 From 3c38ac29da21300de7a3f64766b96a5b5b973f6c Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 28 Oct 2019 10:39:57 +0800 Subject: [PATCH 110/196] update env example --- shards/mishards/.env.example | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/shards/mishards/.env.example b/shards/mishards/.env.example index 8c8e696c31..c8848eaadf 100644 --- a/shards/mishards/.env.example +++ b/shards/mishards/.env.example @@ -12,8 +12,10 @@ SQL_ECHO=True SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False SQL_TEST_ECHO=False +TRACER_PLUGIN_PATH=/tmp/plugins + # TRACING_TEST_TYPE=jaeger -TRACING_TYPE=jaeger +TRACER_CLASS_NAME=jaeger TRACING_SERVICE_NAME=fortest TRACING_SAMPLER_TYPE=const TRACING_SAMPLER_PARAM=1 @@ -21,9 +23,6 @@ TRACING_LOG_PAYLOAD=True #TRACING_SAMPLER_TYPE=probabilistic #TRACING_SAMPLER_PARAM=0.5 -TRACER_PLUGIN_PATH=/tmp/plugins -# TRACER_CLASS_NAME= - #DISCOVERY_PLUGIN_PATH= #DISCOVERY_CLASS_NAME=kubernetes From 783080968439153f3ca5889b7af793b2d730e8c2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 28 Oct 2019 11:03:41 +0800 Subject: [PATCH 111/196] update CN tutorial --- shards/Tutorial_CN.md | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md index 74ddd5ef78..ef82342c6a 100644 --- a/shards/Tutorial_CN.md +++ b/shards/Tutorial_CN.md @@ -13,7 +13,7 @@ Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析 2. pip install -r requirements.txt 3. nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b 4. sudo chown -R $USER:$USER /tmp/milvus -5. cp mishards/.env.example to mishards/.env +5. cp mishards/.env.example mishards/.env 6 7. 在python mishards/main.py #.env配置mishards监听19532端口 ``` @@ -100,23 +100,25 @@ pytest --cov-report html:cov_html --cov=mishards ### 服务发现 | Name | Required | Type | Default Value | Explanation | | --------------------------- | -------- | -------- | ------------- | ------------- | -| SD_PROVIDER | No | string | "Kubernetes" | 配置服务发现服务类型,目前只有Static, Kubernetes可选 | -| SD_STATIC_HOSTS | No | list | [] | **SD_PROVIDER** 为**Static**时,配置服务地址列表,例"192.168.1.188,192.168.1.190"| -| SD_STATIC_PORT | No | int | 19530 | **SD_PROVIDER** 为**Static**时,配置Hosts监听端口 | -| SD_NAMESPACE | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,配置集群namespace | -| SD_IN_CLUSTER | No | bool | False | **SD_PROVIDER** 为**Kubernetes**时,标明服务发现是否在集群中运行 | -| SD_POLL_INTERVAL | No | int | 5 | **SD_PROVIDER** 为**Kubernetes**时,标明服务发现监听服务列表频率,单位Second | -| SD_ROSERVER_POD_PATT | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的正则表达式 | -| SD_LABEL_SELECTOR | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的标签选择 | +| DISCOVERY_PLUGIN_PATH | No | string | - | 用户自定义服务发现插件搜索路径,默认使用系统搜索路径| +| DISCOVERY_CLASS_NAME | No | string | static | 在服务发现插件搜索路径下搜索类并实例化。目前系统提供 **static** 和 **kubernetes** 两种类,默认使用 **static** | +| DISCOVERY_STATIC_HOSTS | No | list | [] | **DISCOVERY_CLASS_NAME** 为 **static** 时,配置服务地址列表,例"192.168.1.188,192.168.1.190"| +| DISCOVERY_STATIC_PORT | No | int | 19530 | **DISCOVERY_CLASS_NAME** 为 **static** 时,配置 Hosts 监听端口 | +| DISCOVERY_KUBERNETES_NAMESPACE | No | string | - | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,配置集群 namespace | +| DISCOVERY_KUBERNETES_IN_CLUSTER | No | bool | False | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,标明服务发现是否在集群中运行 | +| DISCOVERY_KUBERNETES_POLL_INTERVAL | No | int | 5 | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,标明服务发现监听服务列表频率,单位 Second | +| DISCOVERY_KUBERNETES_POD_PATT | No | string | - | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,匹配可读 Milvus 实例的正则表达式 | +| DISCOVERY_KUBERNETES_LABEL_SELECTOR | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的标签选择 | ### 链路追踪 | Name | Required | Type | Default Value | Explanation | | --------------------------- | -------- | -------- | ------------- | ------------- | -| TRACING_TYPE | No | string | "" | 链路追踪方案选择,目前只有Jaeger, 默认不使用| -| TRACING_SERVICE_NAME | No | string | "mishards" | **TRACING_TYPE** 为**Jaeger**时,链路追踪服务名 | -| TRACING_SAMPLER_TYPE | No | string | "const" | **TRACING_TYPE** 为**Jaeger**时,链路追踪采样类型 | -| TRACING_SAMPLER_PARAM | No | int | 1 | **TRACING_TYPE** 为**Jaeger**时,链路追踪采样频率 | -| TRACING_LOG_PAYLOAD | No | bool | False | **TRACING_TYPE** 为**Jaeger**时,链路追踪是否采集Payload | +| TRACER_PLUGIN_PATH | No | string | - | 用户自定义链路追踪插件搜索路径,默认使用系统搜索路径| +| TRACER_CLASS_NAME | No | string | "" | 链路追踪方案选择,目前只实现 **Jaeger**, 默认不使用| +| TRACING_SERVICE_NAME | No | string | "mishards" | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪服务名 | +| TRACING_SAMPLER_TYPE | No | string | "const" | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪采样类型 | +| TRACING_SAMPLER_PARAM | No | int | 1 | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪采样频率 | +| TRACING_LOG_PAYLOAD | No | bool | False | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪是否采集 Payload | ### 日志 | Name | Required | Type | Default Value | Explanation | @@ -128,5 +130,6 @@ pytest --cov-report html:cov_html --cov=mishards ### 路由 | Name | Required | Type | Default Value | Explanation | | --------------------------- | -------- | -------- | ------------- | ------------- | -| ROUTER_CLASS_NAME | No | string | FileBasedHashRingRouter | 处理请求路由类名, 可注册自定义类 | +| ROUTER_PLUGIN_PATH | No | string | - | 用户自定义路由插件搜索路径,默认使用系统搜索路径| +| ROUTER_CLASS_NAME | No | string | FileBasedHashRingRouter | 处理请求路由类名, 可注册自定义类。目前系统只提供了类 **FileBasedHashRingRouter** | | ROUTER_CLASS_TEST_NAME | No | string | FileBasedHashRingRouter | 测试环境下处理请求路由类名, 可注册自定义类 | From 3403dcc5a88fadfa9d32c797667a42a40e14f9ea Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 28 Oct 2019 11:11:39 +0800 Subject: [PATCH 112/196] update kubernetes demo for changes --- shards/kubernetes_demo/milvus_configmap.yaml | 185 +++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 shards/kubernetes_demo/milvus_configmap.yaml diff --git a/shards/kubernetes_demo/milvus_configmap.yaml b/shards/kubernetes_demo/milvus_configmap.yaml new file mode 100644 index 0000000000..cb751c02f1 --- /dev/null +++ b/shards/kubernetes_demo/milvus_configmap.yaml @@ -0,0 +1,185 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-mysql-configmap + namespace: milvus +data: + milvus_mysql_config.yml: | + [mysqld] + pid-file = /var/run/mysqld/mysqld.pid + socket = /var/run/mysqld/mysqld.sock + datadir = /data + log-error = /var/log/mysql/error.log # mount out to host + # By default we only accept connections from localhost + bind-address = 0.0.0.0 + # Disabling symbolic-links is recommended to prevent assorted security risks + symbolic-links=0 + character-set-server = utf8mb4 + collation-server = utf8mb4_unicode_ci + init_connect='SET NAMES utf8mb4' + skip-character-set-client-handshake = true + max_connections = 1000 + wait_timeout = 31536000 + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-proxy-configmap + namespace: milvus +data: + milvus_proxy_config.yml: | + DEBUG=True + TESTING=False + + WOSERVER=tcp://milvus-wo-servers:19530 + SERVER_PORT=19530 + + DISCOVERY_CLASS_NAME=kubernetes + DISCOVERY_KUBERNETES_NAMESPACE=milvus + DISCOVERY_KUBERNETES_POD_PATT=.*-ro-servers-.* + DISCOVERY_KUBERNETES_LABEL_SELECTOR=tier=ro-servers + DISCOVERY_KUBERNETES_POLL_INTERVAL=10 + DISCOVERY_KUBERNETES_IN_CLUSTER=True + + SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:milvusroot@milvus-mysql:3306/milvus?charset=utf8mb4 + SQLALCHEMY_POOL_SIZE=50 + SQLALCHEMY_POOL_RECYCLE=7200 + + LOG_PATH=/var/log/milvus + TIMEZONE=Asia/Shanghai +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-roserver-configmap + namespace: milvus +data: + config.yml: | + server_config: + address: 0.0.0.0 + port: 19530 + mode: cluster_readonly + + db_config: + primary_path: /var/milvus + backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus + insert_buffer_size: 2 + + metric_config: + enable_monitor: off # true is on, false is off + + cache_config: + cpu_cache_capacity: 12 # memory pool to hold index data, unit: GB + cpu_cache_free_percent: 0.85 + insert_cache_immediately: false + # gpu_cache_capacity: 4 + # gpu_cache_free_percent: 0.85 + # gpu_ids: + # - 0 + + engine_config: + use_blas_threshold: 800 + + resource_config: + search_resources: + - gpu0 + + log.conf: | + * GLOBAL: + FORMAT = "%datetime | %level | %logger | %msg" + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-global.log" + ENABLED = true + TO_FILE = true + TO_STANDARD_OUTPUT = true + SUBSECOND_PRECISION = 3 + PERFORMANCE_TRACKING = false + MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB + * DEBUG: + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-debug.log" + ENABLED = true + * WARNING: + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-warning.log" + * TRACE: + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-trace.log" + * VERBOSE: + FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg" + TO_FILE = true + TO_STANDARD_OUTPUT = true + ## Error logs + * ERROR: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-error.log" + * FATAL: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-fatal.log" + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: milvus-woserver-configmap + namespace: milvus +data: + config.yml: | + server_config: + address: 0.0.0.0 + port: 19530 + mode: cluster_writable + + db_config: + primary_path: /var/milvus + backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus + insert_buffer_size: 2 + + metric_config: + enable_monitor: off # true is on, false is off + + cache_config: + cpu_cache_capacity: 2 # memory pool to hold index data, unit: GB + cpu_cache_free_percent: 0.85 + insert_cache_immediately: false + # gpu_cache_capacity: 4 + # gpu_cache_free_percent: 0.85 + # gpu_ids: + # - 0 + + engine_config: + use_blas_threshold: 800 + + resource_config: + search_resources: + - gpu0 + + + log.conf: | + * GLOBAL: + FORMAT = "%datetime | %level | %logger | %msg" + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-global.log" + ENABLED = true + TO_FILE = true + TO_STANDARD_OUTPUT = true + SUBSECOND_PRECISION = 3 + PERFORMANCE_TRACKING = false + MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB + * DEBUG: + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-debug.log" + ENABLED = true + * WARNING: + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-warning.log" + * TRACE: + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-trace.log" + * VERBOSE: + FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg" + TO_FILE = true + TO_STANDARD_OUTPUT = true + ## Error logs + * ERROR: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-error.log" + * FATAL: + ENABLED = true + FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-fatal.log" From 4167cecc9f7bb07be61d2a363e2c2eb3d8ef18b0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 28 Oct 2019 11:19:46 +0800 Subject: [PATCH 113/196] changes for code style check --- shards/mishards/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shards/mishards/__init__.py b/shards/mishards/__init__.py index b4c51cc4f5..a3c55c4ae3 100644 --- a/shards/mishards/__init__.py +++ b/shards/mishards/__init__.py @@ -23,8 +23,8 @@ def create_app(testing_config=None): from mishards.grpc_utils import GrpcSpanDecorator from tracer.factory import TracerFactory tracer = TracerFactory(config.TRACER_PLUGIN_PATH).create(config.TRACER_CLASS_NAME, - plugin_config=settings.TracingConfig, - span_decorator=GrpcSpanDecorator()) + plugin_config=settings.TracingConfig, + span_decorator=GrpcSpanDecorator()) from mishards.router.factory import RouterFactory router = RouterFactory(config.ROUTER_PLUGIN_PATH).create(config.ROUTER_CLASS_NAME, From 2205d0129e3f842b2eeca6cd2fbf8ebdd3d75fd8 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 31 Oct 2019 10:47:46 +0800 Subject: [PATCH 114/196] (mishards): add Makefile --- shards/Makefile | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 shards/Makefile diff --git a/shards/Makefile b/shards/Makefile new file mode 100644 index 0000000000..8c351f05e8 --- /dev/null +++ b/shards/Makefile @@ -0,0 +1,15 @@ +build: + docker build --network=host -t milvusdb/mishards . +push: + docker push milvusdb/mishards +pull: + docker pull milvusdb/mishards +deploy: + cd all_in_one && docker-compose -f all_in_one.yml up -d && cd - +clean: + rm -rf cov_html + cd all_in_one && docker-compose -f all_in_one.yml down && cd - +check_style: + pycodestyle --config=. +make test: + pytest --cov-report html:cov_html --cov=mishards From 084215b2489d98756943bb9bd65d462392f91294 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 31 Oct 2019 11:09:40 +0800 Subject: [PATCH 115/196] (mishards): update for makefile --- shards/Makefile | 12 ++++++++---- shards/Tutorial_CN.md | 15 +++++++++++---- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/shards/Makefile b/shards/Makefile index 8c351f05e8..7ad724ec4c 100644 --- a/shards/Makefile +++ b/shards/Makefile @@ -6,10 +6,14 @@ pull: docker pull milvusdb/mishards deploy: cd all_in_one && docker-compose -f all_in_one.yml up -d && cd - -clean: - rm -rf cov_html +clean_deploy: cd all_in_one && docker-compose -f all_in_one.yml down && cd - -check_style: +clean_coverage: + rm -rf cov_html +clean: clean_coverage clean_deploy +style: pycodestyle --config=. -make test: +coverage: pytest --cov-report html:cov_html --cov=mishards +test: + pytest diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md index ef82342c6a..0c44897aea 100644 --- a/shards/Tutorial_CN.md +++ b/shards/Tutorial_CN.md @@ -24,8 +24,8 @@ Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析 **启动** ``` 1. 安装docker-compose -1. cd milvus/shards/all_in_one -2. docker-compose -f all_in_one.yml up -d #监听19531端口 +2. make deploy #监听19531端口 +3. make clean_deploy #清理服务 ``` **打开Jaeger UI** @@ -70,12 +70,19 @@ kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 查看计算节点milv **启动单元测试** ``` 1. cd milvus/shards -2. pytest +2. make test ``` **单元测试覆盖率** ``` -pytest --cov-report html:cov_html --cov=mishards +1. cd milvus/shards +2. make coverage +``` + +**代码风格检查** +``` +1. cd milvus/shards +2. make style ``` ## mishards配置详解 From f05f7b94df26c55015201e3441dc704bcd02373f Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 31 Oct 2019 11:11:41 +0800 Subject: [PATCH 116/196] (mishards): update for tutorial and all_in_one yml --- shards/Tutorial_CN.md | 5 +++-- shards/start_services.yml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md index 0c44897aea..261fa64c8c 100644 --- a/shards/Tutorial_CN.md +++ b/shards/Tutorial_CN.md @@ -24,8 +24,9 @@ Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析 **启动** ``` 1. 安装docker-compose -2. make deploy #监听19531端口 -3. make clean_deploy #清理服务 +2. make build +3. make deploy #监听19531端口 +4. make clean_deploy #清理服务 ``` **打开Jaeger UI** diff --git a/shards/start_services.yml b/shards/start_services.yml index 286230feeb..95acdd045e 100644 --- a/shards/start_services.yml +++ b/shards/start_services.yml @@ -21,7 +21,7 @@ services: mishards: restart: always - image: registry.zilliz.com/milvus/mishards:v0.0.4 + image: milvusdb/mishards ports: - "0.0.0.0:19530:19531" - "0.0.0.0:19532:19532" From 4af986acd47417849d1ed702458c5ed189119c97 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 4 Nov 2019 09:35:49 +0800 Subject: [PATCH 117/196] (shards): remove build.sh From 58a31cfe22341d28b4e7d183291683b034e7156b Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 4 Nov 2019 11:15:07 +0800 Subject: [PATCH 118/196] (shards): update makefile --- shards/Makefile | 18 +++++++++++++++++- shards/all_in_one/probe_test.py | 25 +++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 shards/all_in_one/probe_test.py diff --git a/shards/Makefile b/shards/Makefile index 7ad724ec4c..b1cdecdce2 100644 --- a/shards/Makefile +++ b/shards/Makefile @@ -1,13 +1,29 @@ +HOST=$(or $(host),127.0.0.1) +PORT=$(or $(port),19530) + build: docker build --network=host -t milvusdb/mishards . push: docker push milvusdb/mishards pull: docker pull milvusdb/mishards -deploy: +deploy: clean_deploy cd all_in_one && docker-compose -f all_in_one.yml up -d && cd - clean_deploy: cd all_in_one && docker-compose -f all_in_one.yml down && cd - +probe_deploy: + docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py" +cluster: + cd kubernetes_demo;./start.sh baseup;./start.sh appup;cd - +clean_cluster: + cd kubernetes_demo;./start.sh cleanup;cd - +cluster_status: + kubectl get pods -n milvus -o wide +probe_cluster: + @echo + $(shell kubectl get service -n milvus | grep milvus-proxy-servers | awk {'print $$4,$$5'} | awk -F"[: ]" {'print "docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c \"python all_in_one/probe_test.py --port="$$2" --host="$$1"\""'}) +probe: + docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py --port=${PORT} --host=${HOST}" clean_coverage: rm -rf cov_html clean: clean_coverage clean_deploy diff --git a/shards/all_in_one/probe_test.py b/shards/all_in_one/probe_test.py new file mode 100644 index 0000000000..6250465910 --- /dev/null +++ b/shards/all_in_one/probe_test.py @@ -0,0 +1,25 @@ +from milvus import Milvus + +RED = '\033[0;31m' +GREEN = '\033[0;32m' +ENDC = '' + + +def test(host='127.0.0.1', port=19531): + client = Milvus() + try: + status = client.connect(host=host, port=port) + if status.OK(): + print('{}Pass: Connected{}'.format(GREEN, ENDC)) + return 0 + else: + print('{}Error: {}{}'.format(RED, status, ENDC)) + return 1 + except Exception as exc: + print('{}Error: {}{}'.format(RED, exc, ENDC)) + return 1 + + +if __name__ == '__main__': + import fire + fire.Fire(test) From 13c445ccaa2a84369f0d9068b10f063cc19a7e2e Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 4 Nov 2019 11:33:34 +0800 Subject: [PATCH 119/196] (shards): update makefile for cluster --- shards/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shards/Makefile b/shards/Makefile index b1cdecdce2..a71ef6a70c 100644 --- a/shards/Makefile +++ b/shards/Makefile @@ -14,7 +14,7 @@ clean_deploy: probe_deploy: docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py" cluster: - cd kubernetes_demo;./start.sh baseup;./start.sh appup;cd - + cd kubernetes_demo;./start.sh baseup;sleep 10;./start.sh appup;cd - clean_cluster: cd kubernetes_demo;./start.sh cleanup;cd - cluster_status: From 275462eaf93c69d7f4ce01205a8ed1d996f7ebf0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 4 Nov 2019 11:33:51 +0800 Subject: [PATCH 120/196] (shards): update cn doc --- shards/Tutorial_CN.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md index 261fa64c8c..192a0fd285 100644 --- a/shards/Tutorial_CN.md +++ b/shards/Tutorial_CN.md @@ -14,8 +14,8 @@ Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析 3. nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b 4. sudo chown -R $USER:$USER /tmp/milvus 5. cp mishards/.env.example mishards/.env -6 -7. 在python mishards/main.py #.env配置mishards监听19532端口 +6. 在python mishards/main.py #.env配置mishards监听19532端口 +7. make probe port=19532 #健康检查 ``` ### 容器启动实例 @@ -23,10 +23,12 @@ Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析 **启动** ``` +cd milvus/shards 1. 安装docker-compose 2. make build 3. make deploy #监听19531端口 4. make clean_deploy #清理服务 +5. make probe_deplopy #健康检查 ``` **打开Jaeger UI** @@ -45,19 +47,21 @@ Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析 **步骤** ``` -1. cd milvus/shards/kubernetes_demo/ -2. ./start.sh allup -3. watch -n 1 kubectl get pods -n milvus -o wide 查看所有pod状态,等待所有pod都处于Runing状态 -4. kubectl get service -n milvus 查看milvus-proxy-servers的EXTERNAL-IP和PORT, 这就是mishards集群的服务地址 +cd milvus/shards +1. make deploy_cluster #启动集群 +2. make probe_cluster #健康检查 +3. make clean_cluster #关闭集群 ``` **扩容计算实例** ``` +cd milvus/shards/kubernetes_demo/ ./start.sh scale-ro-server 2 扩容计算实例到2 ``` **扩容代理器实例** ``` +cd milvus/shards/kubernetes_demo/ ./start.sh scale-proxy 2 扩容代理服务器实例到2 ``` From 013566dec4af677d419dba4c487ea86f40fbf8c2 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Mon, 4 Nov 2019 11:34:58 +0800 Subject: [PATCH 121/196] (shards): clean cluster --- shards/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shards/Makefile b/shards/Makefile index a71ef6a70c..c8aa6127f8 100644 --- a/shards/Makefile +++ b/shards/Makefile @@ -26,7 +26,7 @@ probe: docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py --port=${PORT} --host=${HOST}" clean_coverage: rm -rf cov_html -clean: clean_coverage clean_deploy +clean: clean_coverage clean_deploy clean_cluster style: pycodestyle --config=. coverage: From df8a018549785b5f7192367f5f183264460fafa5 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Wed, 6 Nov 2019 17:01:16 +0800 Subject: [PATCH 122/196] (shards): all all missing changes after pick-cherry from xupeng's branch --- shards/.dockerignore | 13 + shards/all_in_one/all_in_one.yml | 53 +++ shards/all_in_one/ro_server.yml | 41 ++ shards/kubernetes_demo/README.md | 107 +++++ shards/kubernetes_demo/milvus_auxiliary.yaml | 67 ++++ shards/kubernetes_demo/milvus_data_pvc.yaml | 57 +++ shards/kubernetes_demo/milvus_proxy.yaml | 88 +++++ shards/kubernetes_demo/milvus_rbac.yaml | 24 ++ .../milvus_stateful_servers.yaml | 68 ++++ .../kubernetes_demo/milvus_write_servers.yaml | 70 ++++ shards/kubernetes_demo/start.sh | 368 ++++++++++++++++++ shards/manager.py | 12 - shards/mishards/.env.example | 2 +- shards/mishards/settings.py | 13 +- 14 files changed, 958 insertions(+), 25 deletions(-) create mode 100644 shards/.dockerignore create mode 100644 shards/all_in_one/all_in_one.yml create mode 100644 shards/all_in_one/ro_server.yml create mode 100644 shards/kubernetes_demo/README.md create mode 100644 shards/kubernetes_demo/milvus_auxiliary.yaml create mode 100644 shards/kubernetes_demo/milvus_data_pvc.yaml create mode 100644 shards/kubernetes_demo/milvus_proxy.yaml create mode 100644 shards/kubernetes_demo/milvus_rbac.yaml create mode 100644 shards/kubernetes_demo/milvus_stateful_servers.yaml create mode 100644 shards/kubernetes_demo/milvus_write_servers.yaml create mode 100755 shards/kubernetes_demo/start.sh diff --git a/shards/.dockerignore b/shards/.dockerignore new file mode 100644 index 0000000000..e450610057 --- /dev/null +++ b/shards/.dockerignore @@ -0,0 +1,13 @@ +.git +.gitignore +.env +.coverage +.dockerignore +cov_html/ + +.pytest_cache +__pycache__ +*/__pycache__ +*.md +*.yml +*.yaml diff --git a/shards/all_in_one/all_in_one.yml b/shards/all_in_one/all_in_one.yml new file mode 100644 index 0000000000..40473fe8b9 --- /dev/null +++ b/shards/all_in_one/all_in_one.yml @@ -0,0 +1,53 @@ +version: "2.3" +services: + milvus_wr: + runtime: nvidia + restart: always + image: milvusdb/milvus:0.5.0-d102119-ede20b + volumes: + - /tmp/milvus/db:/opt/milvus/db + + milvus_ro: + runtime: nvidia + restart: always + image: milvusdb/milvus:0.5.0-d102119-ede20b + volumes: + - /tmp/milvus/db:/opt/milvus/db + - ./ro_server.yml:/opt/milvus/conf/server_config.yaml + + jaeger: + restart: always + image: jaegertracing/all-in-one:1.14 + ports: + - "0.0.0.0:5775:5775/udp" + - "0.0.0.0:16686:16686" + - "0.0.0.0:9441:9441" + environment: + COLLECTOR_ZIPKIN_HTTP_PORT: 9411 + + mishards: + restart: always + image: milvusdb/mishards + ports: + - "0.0.0.0:19531:19531" + - "0.0.0.0:19532:19532" + volumes: + - /tmp/milvus/db:/tmp/milvus/db + # - /tmp/mishards_env:/source/mishards/.env + command: ["python", "mishards/main.py"] + environment: + FROM_EXAMPLE: 'true' + DEBUG: 'true' + SERVER_PORT: 19531 + WOSERVER: tcp://milvus_wr:19530 + DISCOVERY_PLUGIN_PATH: static + DISCOVERY_STATIC_HOSTS: milvus_wr,milvus_ro + TRACER_CLASS_NAME: jaeger + TRACING_SERVICE_NAME: mishards-demo + TRACING_REPORTING_HOST: jaeger + TRACING_REPORTING_PORT: 5775 + + depends_on: + - milvus_wr + - milvus_ro + - jaeger diff --git a/shards/all_in_one/ro_server.yml b/shards/all_in_one/ro_server.yml new file mode 100644 index 0000000000..10cf695448 --- /dev/null +++ b/shards/all_in_one/ro_server.yml @@ -0,0 +1,41 @@ +server_config: + address: 0.0.0.0 # milvus server ip address (IPv4) + port: 19530 # port range: 1025 ~ 65534 + deploy_mode: cluster_readonly # deployment type: single, cluster_readonly, cluster_writable + time_zone: UTC+8 + +db_config: + primary_path: /opt/milvus # path used to store data and meta + secondary_path: # path used to store data only, split by semicolon + + backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database + # Keep 'dialect://:@:/', and replace other texts with real values + # Replace 'dialect' with 'mysql' or 'sqlite' + + insert_buffer_size: 4 # GB, maximum insert buffer size allowed + # sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory + + preload_table: # preload data at startup, '*' means load all tables, empty value means no preload + # you can specify preload tables like this: table1,table2,table3 + +metric_config: + enable_monitor: false # enable monitoring or not + collector: prometheus # prometheus + prometheus_config: + port: 8080 # port prometheus uses to fetch metrics + +cache_config: + cpu_cache_capacity: 16 # GB, CPU memory used for cache + cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered + gpu_cache_capacity: 4 # GB, GPU memory used for cache + gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered + cache_insert_data: false # whether to load inserted data into cache + +engine_config: + use_blas_threshold: 20 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times + # if nq >= use_blas_threshold, use OpenBlas, slower with stable response times + +resource_config: + search_resources: # define the GPUs used for search computation, valid value: gpux + - gpu0 + index_build_device: gpu0 # GPU used for building index diff --git a/shards/kubernetes_demo/README.md b/shards/kubernetes_demo/README.md new file mode 100644 index 0000000000..933fcd56a8 --- /dev/null +++ b/shards/kubernetes_demo/README.md @@ -0,0 +1,107 @@ +This document is a gentle introduction to Milvus Cluster, that does not use complex to understand distributed systems concepts. It provides instructions about how to setup a cluster, test, and operate it, without going into the details that are covered in the Milvus Cluster specification but just describing how the system behaves from the point of view of the user. + +However this tutorial tries to provide information about the availability and consistency characteristics of Milvus Cluster from the point of view of the final user, stated in a simple to understand way. + +If you plan to run a serious Milvus Cluster deployment, the more formal specification is a suggested reading, even if not strictly required. However it is a good idea to start from this document, play with Milvus Cluster some time, and only later read the specification. + +## Milvus Cluster Introduction +### Infrastructure +* Kubenetes Cluster With Nvida GPU Node +* Install Nvida Docker in Cluster + +### Requried Docker Registry +* Milvus Server: ```registry.zilliz.com/milvus/engine:${version>=0.3.1}``` +* Milvus Celery Apps: ```registry.zilliz.com/milvus/celery-apps:${version>=v0.2.1}``` + +### Cluster Ability +* Milvus Cluster provides a way to run a Milvus installation where query requests are automatically sharded across multiple milvus readonly nodes. +* Milvus Cluster provides availability during partitions, that is in pratical terms the ability to continue the operations when some nodes fail or are not able to communicate. + +### Metastore +Milvus supports 2 backend databases for deployment: +* Splite3: Single mode only. +* MySQL: Single/Cluster mode +* ETCD: `TODO` + +### Storage +Milvus supports 2 backend storage for deployment: +* Local filesystem: Convenient for use and deployment but not reliable. +* S3 OOS: Reliable: Need extra configuration. Need external storage service. + +### Message Queue +Milvus supports various MQ backend for deployment: +* Redis +* Rabbitmq +* MySQL/PG/MongoDB + +### Cache +* Milvus supports `Redis` as Cache backend for deployment. To reduce the system complexity, we recommend to use `Redis` as MQ backend. + +### Workflow +* Milvus Cluster use Celery as workflow scheduler. +* Milvus Cluster workflow calculation node can be scaled. +* Milvus Cluster only contains 1 worflow monitor node. Monitor node detects caculation nodes status and provides decision for work scheduling. +* Milvus Cluster supports different workflow result backend and we recommend to use `Redis` as result backend for performance consideration. + +### Writeonly Node +* Milvus can be configured in write-only mode. +* Right now Milvus Cluster only provide 1 write-only node. + +### Readonly Node +* Milvus can be configured in readonly mode. +* Milvus Cluster automatically shard incoming query requests across multiple readonly nodes. +* Milvus Cluster supports readonly nodes scaling. +* Milvus Cluster provides pratical solution to avoid performance degradation during cluster rebalance. + +### Proxy +* Milvus Cluster communicates with clients by proxy. +* Milvus Cluster supports proxy scaling. + +### Monitor +* Milvus Cluster suports metrics monitoring by prometheus. +* Milvus Cluster suports workflow tasks monitoring by flower. +* Milvus Cluster suports cluster monitoring by all kubernetes ecosystem monitoring tools. + +## Milvus Cluster Kubernetes Resources +### PersistentVolumeClaim +* LOG PersistentVolume: `milvus-log-disk` + +### ConfigMap +* Celery workflow configmap: `milvus-celery-configmap`::`milvus_celery_config.yml` +* Proxy configmap: `milvus-proxy-configmap`::`milvus_proxy_config.yml` +* Readonly nodes configmap: `milvus-roserver-configmap`::`config.yml`, `milvus-roserver-configmap`::`log.conf` +* Write-only nodes configmap: `milvus-woserver-configmap`::`config.yml`, `milvus-woserver-configmap`::`log.conf` + +### Services +* Mysql service: `milvus-mysql` +* Redis service: `milvus-redis` +* Rroxy service: `milvus-proxy-servers` +* Write-only servers service: `milvus-wo-servers` + +### StatefulSet +* Readonly stateful servers: `milvus-ro-servers` + +### Deployment +* Worflow monitor: `milvus-monitor` +* Worflow workers: `milvus-workers` +* Write-only servers: `milvus-wo-servers` +* Proxy: `milvus-proxy` + +## Milvus Cluster Configuration +### Write-only server: +```milvus-woserver-configmap::config.yml: + server_config.mode: cluster + db_config.db_backend_url: mysql://${user}:${password}@milvus-mysql/${dbname} +``` +### Readonly server: +```milvus-roserver-configmap::config.yml: + server_config.mode: read_only + db_config.db_backend_url: mysql://\${user}:${password}@milvus-mysql/${dbname} +``` +### Celery workflow: +```milvus-celery-configmap::milvus_celery_config.yml: + DB_URI=mysql+mysqlconnector://${user}:${password}@milvus-mysql/${dbname} +``` +### Proxy workflow: +```milvus-proxy-configmap::milvus_proxy_config.yml: +``` diff --git a/shards/kubernetes_demo/milvus_auxiliary.yaml b/shards/kubernetes_demo/milvus_auxiliary.yaml new file mode 100644 index 0000000000..fff27adc6f --- /dev/null +++ b/shards/kubernetes_demo/milvus_auxiliary.yaml @@ -0,0 +1,67 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-mysql + namespace: milvus +spec: + type: ClusterIP + selector: + app: milvus + tier: mysql + ports: + - protocol: TCP + port: 3306 + targetPort: 3306 + name: mysql + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: milvus-mysql + namespace: milvus +spec: + selector: + matchLabels: + app: milvus + tier: mysql + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: mysql + spec: + containers: + - name: milvus-mysql + image: mysql:5.7 + imagePullPolicy: IfNotPresent + # lifecycle: + # postStart: + # exec: + # command: ["/bin/sh", "-c", "mysql -h milvus-mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e \"CREATE DATABASE IF NOT EXISTS ${DATABASE};\"; \ + # mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e \"GRANT ALL PRIVILEGES ON ${DATABASE}.* TO 'root'@'%';\""] + env: + - name: MYSQL_ROOT_PASSWORD + value: milvusroot + - name: DATABASE + value: milvus + ports: + - name: mysql-port + containerPort: 3306 + volumeMounts: + - name: milvus-mysql-disk + mountPath: /data + subPath: mysql + - name: milvus-mysql-configmap + mountPath: /etc/mysql/mysql.conf.d/mysqld.cnf + subPath: milvus_mysql_config.yml + + volumes: + - name: milvus-mysql-disk + persistentVolumeClaim: + claimName: milvus-mysql-disk + - name: milvus-mysql-configmap + configMap: + name: milvus-mysql-configmap diff --git a/shards/kubernetes_demo/milvus_data_pvc.yaml b/shards/kubernetes_demo/milvus_data_pvc.yaml new file mode 100644 index 0000000000..480354507d --- /dev/null +++ b/shards/kubernetes_demo/milvus_data_pvc.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-db-disk + namespace: milvus +spec: + accessModes: + - ReadWriteMany + storageClassName: default + resources: + requests: + storage: 50Gi + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-log-disk + namespace: milvus +spec: + accessModes: + - ReadWriteMany + storageClassName: default + resources: + requests: + storage: 50Gi + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-mysql-disk + namespace: milvus +spec: + accessModes: + - ReadWriteMany + storageClassName: default + resources: + requests: + storage: 50Gi + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: milvus-redis-disk + namespace: milvus +spec: + accessModes: + - ReadWriteOnce + storageClassName: default + resources: + requests: + storage: 5Gi diff --git a/shards/kubernetes_demo/milvus_proxy.yaml b/shards/kubernetes_demo/milvus_proxy.yaml new file mode 100644 index 0000000000..13916b7b2b --- /dev/null +++ b/shards/kubernetes_demo/milvus_proxy.yaml @@ -0,0 +1,88 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-proxy-servers + namespace: milvus +spec: + type: LoadBalancer + selector: + app: milvus + tier: proxy + ports: + - name: tcp + protocol: TCP + port: 19530 + targetPort: 19530 + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: milvus-proxy + namespace: milvus +spec: + selector: + matchLabels: + app: milvus + tier: proxy + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: proxy + spec: + containers: + - name: milvus-proxy + image: milvusdb/mishards:0.1.0-rc0 + imagePullPolicy: Always + command: ["python", "mishards/main.py"] + resources: + limits: + memory: "3Gi" + cpu: "4" + requests: + memory: "2Gi" + ports: + - name: tcp + containerPort: 5000 + env: + # - name: SQL_ECHO + # value: "True" + - name: DEBUG + value: "False" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MILVUS_CLIENT + value: "False" + - name: LOG_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: LOG_PATH + value: /var/log/milvus + - name: SD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SD_ROSERVER_POD_PATT + value: ".*-ro-servers-.*" + volumeMounts: + - name: milvus-proxy-configmap + mountPath: /source/mishards/.env + subPath: milvus_proxy_config.yml + - name: milvus-log-disk + mountPath: /var/log/milvus + subPath: proxylog + # imagePullSecrets: + # - name: regcred + volumes: + - name: milvus-proxy-configmap + configMap: + name: milvus-proxy-configmap + - name: milvus-log-disk + persistentVolumeClaim: + claimName: milvus-log-disk diff --git a/shards/kubernetes_demo/milvus_rbac.yaml b/shards/kubernetes_demo/milvus_rbac.yaml new file mode 100644 index 0000000000..e6f302be15 --- /dev/null +++ b/shards/kubernetes_demo/milvus_rbac.yaml @@ -0,0 +1,24 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pods-list +rules: +- apiGroups: [""] + resources: ["pods", "events"] + verbs: ["list", "get", "watch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pods-list +subjects: +- kind: ServiceAccount + name: default + namespace: milvus +roleRef: + kind: ClusterRole + name: pods-list + apiGroup: rbac.authorization.k8s.io +--- diff --git a/shards/kubernetes_demo/milvus_stateful_servers.yaml b/shards/kubernetes_demo/milvus_stateful_servers.yaml new file mode 100644 index 0000000000..4ff5045599 --- /dev/null +++ b/shards/kubernetes_demo/milvus_stateful_servers.yaml @@ -0,0 +1,68 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-ro-servers + namespace: milvus +spec: + type: ClusterIP + selector: + app: milvus + tier: ro-servers + ports: + - protocol: TCP + port: 19530 + targetPort: 19530 + +--- + +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: milvus-ro-servers + namespace: milvus +spec: + serviceName: "milvus-ro-servers" + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: ro-servers + spec: + terminationGracePeriodSeconds: 11 + containers: + - name: milvus-ro-server + image: milvusdb/milvus:0.5.0-d102119-ede20b + imagePullPolicy: Always + ports: + - containerPort: 19530 + resources: + limits: + memory: "16Gi" + cpu: "8.0" + requests: + memory: "14Gi" + volumeMounts: + - name: milvus-db-disk + mountPath: /var/milvus + subPath: dbdata + - name: milvus-roserver-configmap + mountPath: /opt/milvus/conf/server_config.yaml + subPath: config.yml + - name: milvus-roserver-configmap + mountPath: /opt/milvus/conf/log_config.conf + subPath: log.conf + # imagePullSecrets: + # - name: regcred + # tolerations: + # - key: "worker" + # operator: "Equal" + # value: "performance" + # effect: "NoSchedule" + volumes: + - name: milvus-roserver-configmap + configMap: + name: milvus-roserver-configmap + - name: milvus-db-disk + persistentVolumeClaim: + claimName: milvus-db-disk diff --git a/shards/kubernetes_demo/milvus_write_servers.yaml b/shards/kubernetes_demo/milvus_write_servers.yaml new file mode 100644 index 0000000000..6aec4b0373 --- /dev/null +++ b/shards/kubernetes_demo/milvus_write_servers.yaml @@ -0,0 +1,70 @@ +kind: Service +apiVersion: v1 +metadata: + name: milvus-wo-servers + namespace: milvus +spec: + type: ClusterIP + selector: + app: milvus + tier: wo-servers + ports: + - protocol: TCP + port: 19530 + targetPort: 19530 + +--- + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: milvus-wo-servers + namespace: milvus +spec: + selector: + matchLabels: + app: milvus + tier: wo-servers + replicas: 1 + template: + metadata: + labels: + app: milvus + tier: wo-servers + spec: + containers: + - name: milvus-wo-server + image: milvusdb/milvus:0.5.0-d102119-ede20b + imagePullPolicy: Always + ports: + - containerPort: 19530 + resources: + limits: + memory: "5Gi" + cpu: "1.0" + requests: + memory: "4Gi" + volumeMounts: + - name: milvus-db-disk + mountPath: /var/milvus + subPath: dbdata + - name: milvus-woserver-configmap + mountPath: /opt/milvus/conf/server_config.yaml + subPath: config.yml + - name: milvus-woserver-configmap + mountPath: /opt/milvus/conf/log_config.conf + subPath: log.conf + # imagePullSecrets: + # - name: regcred + # tolerations: + # - key: "worker" + # operator: "Equal" + # value: "performance" + # effect: "NoSchedule" + volumes: + - name: milvus-woserver-configmap + configMap: + name: milvus-woserver-configmap + - name: milvus-db-disk + persistentVolumeClaim: + claimName: milvus-db-disk diff --git a/shards/kubernetes_demo/start.sh b/shards/kubernetes_demo/start.sh new file mode 100755 index 0000000000..7441aa5d70 --- /dev/null +++ b/shards/kubernetes_demo/start.sh @@ -0,0 +1,368 @@ +#!/bin/bash + +UL=`tput smul` +NOUL=`tput rmul` +BOLD=`tput bold` +NORMAL=`tput sgr0` +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +ENDC='\033[0m' + +function showHelpMessage () { + echo -e "${BOLD}Usage:${NORMAL} ${RED}$0${ENDC} [option...] {cleanup${GREEN}|${ENDC}baseup${GREEN}|${ENDC}appup${GREEN}|${ENDC}appdown${GREEN}|${ENDC}allup}" >&2 + echo + echo " -h, --help show help message" + echo " ${BOLD}cleanup, delete all resources${NORMAL}" + echo " ${BOLD}baseup, start all required base resources${NORMAL}" + echo " ${BOLD}appup, start all pods${NORMAL}" + echo " ${BOLD}appdown, remove all pods${NORMAL}" + echo " ${BOLD}allup, start all base resources and pods${NORMAL}" + echo " ${BOLD}scale-proxy, scale proxy${NORMAL}" + echo " ${BOLD}scale-ro-server, scale readonly servers${NORMAL}" + echo " ${BOLD}scale-worker, scale calculation workers${NORMAL}" +} + +function showscaleHelpMessage () { + echo -e "${BOLD}Usage:${NORMAL} ${RED}$0 $1${ENDC} [option...] {1|2|3|4|...}" >&2 + echo + echo " -h, --help show help message" + echo " ${BOLD}number, (int) target scale number" +} + +function PrintScaleSuccessMessage() { + echo -e "${BLUE}${BOLD}Successfully Scaled: ${1} --> ${2}${ENDC}" +} + +function PrintPodStatusMessage() { + echo -e "${BOLD}${1}${NORMAL}" +} + +timeout=60 + +function setUpMysql () { + mysqlUserName=$(kubectl describe configmap -n milvus milvus-roserver-configmap | + grep backend_url | + awk '{print $2}' | + awk '{split($0, level1, ":"); + split(level1[2], level2, "/"); + print level2[3]}') + mysqlPassword=$(kubectl describe configmap -n milvus milvus-roserver-configmap | + grep backend_url | + awk '{print $2}' | + awk '{split($0, level1, ":"); + split(level1[3], level3, "@"); + print level3[1]}') + mysqlDBName=$(kubectl describe configmap -n milvus milvus-roserver-configmap | + grep backend_url | + awk '{print $2}' | + awk '{split($0, level1, ":"); + split(level1[4], level4, "/"); + print level4[2]}') + mysqlContainer=$(kubectl get pods -n milvus | grep milvus-mysql | awk '{print $1}') + + kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "CREATE DATABASE IF NOT EXISTS $mysqlDBName;" + + checkDBExists=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '$mysqlDBName';" | grep -o $mysqlDBName | wc -l) + counter=0 + while [ $checkDBExists -lt 1 ]; do + sleep 1 + let counter=counter+1 + if [ $counter == $timeout ]; then + echo "Creating MySQL database $mysqlDBName timeout" + return 1 + fi + checkDBExists=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '$mysqlDBName';" | grep -o $mysqlDBName | wc -l) + done; + + kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "GRANT ALL PRIVILEGES ON $mysqlDBName.* TO '$mysqlUserName'@'%';" + kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "FLUSH PRIVILEGES;" + checkGrant=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SHOW GRANTS for $mysqlUserName;" | grep -o "GRANT ALL PRIVILEGES ON \`$mysqlDBName\`\.\*" | wc -l) + counter=0 + while [ $checkGrant -lt 1 ]; do + sleep 1 + let counter=counter+1 + if [ $counter == $timeout ]; then + echo "Granting all privileges on $mysqlDBName to $mysqlUserName timeout" + return 1 + fi + checkGrant=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SHOW GRANTS for $mysqlUserName;" | grep -o "GRANT ALL PRIVILEGES ON \`$mysqlDBName\`\.\*" | wc -l) + done; +} + +function checkStatefulSevers() { + stateful_replicas=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Replicas:" | awk '{print $2}') + stateful_running_pods=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Pods Status:" | awk '{print $3}') + + counter=0 + prev=$stateful_running_pods + PrintPodStatusMessage "Running milvus-ro-servers Pods: $stateful_running_pods/$stateful_replicas" + while [ $stateful_replicas != $stateful_running_pods ]; do + echo -e "${YELLOW}Wait another 1 sec --- ${counter}${ENDC}" + sleep 1; + + let counter=counter+1 + if [ $counter -eq $timeout ]; then + return 1; + fi + + stateful_running_pods=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Pods Status:" | awk '{print $3}') + if [ $stateful_running_pods -ne $prev ]; then + PrintPodStatusMessage "Running milvus-ro-servers Pods: $stateful_running_pods/$stateful_replicas" + fi + prev=$stateful_running_pods + done; + return 0; +} + +function checkDeployment() { + deployment_name=$1 + replicas=$(kubectl describe deployment -n milvus $deployment_name | grep "Replicas:" | awk '{print $2}') + running=$(kubectl get pods -n milvus | grep $deployment_name | grep Running | wc -l) + + counter=0 + prev=$running + PrintPodStatusMessage "Running $deployment_name Pods: $running/$replicas" + while [ $replicas != $running ]; do + echo -e "${YELLOW}Wait another 1 sec --- ${counter}${ENDC}" + sleep 1; + + let counter=counter+1 + if [ $counter == $timeout ]; then + return 1 + fi + + running=$(kubectl get pods -n milvus | grep "$deployment_name" | grep Running | wc -l) + if [ $running -ne $prev ]; then + PrintPodStatusMessage "Running $deployment_name Pods: $running/$replicas" + fi + prev=$running + done +} + + +function startDependencies() { + kubectl apply -f milvus_data_pvc.yaml + kubectl apply -f milvus_configmap.yaml + kubectl apply -f milvus_auxiliary.yaml + + counter=0 + while [ $(kubectl get pvc -n milvus | grep Bound | wc -l) != 4 ]; do + sleep 1; + let counter=counter+1 + if [ $counter == $timeout ]; then + echo "baseup timeout" + return 1 + fi + done + checkDeployment "milvus-mysql" +} + +function startApps() { + counter=0 + errmsg="" + echo -e "${GREEN}${BOLD}Checking required resouces...${NORMAL}${ENDC}" + while [ $counter -lt $timeout ]; do + sleep 1; + if [ $(kubectl get pvc -n milvus 2>/dev/null | grep Bound | wc -l) != 4 ]; then + echo -e "${YELLOW}No pvc. Wait another sec... $counter${ENDC}"; + errmsg='No pvc'; + let counter=counter+1; + continue + fi + if [ $(kubectl get configmap -n milvus 2>/dev/null | grep milvus | wc -l) != 4 ]; then + echo -e "${YELLOW}No configmap. Wait another sec... $counter${ENDC}"; + errmsg='No configmap'; + let counter=counter+1; + continue + fi + if [ $(kubectl get ep -n milvus 2>/dev/null | grep milvus-mysql | awk '{print $2}') == "" ]; then + echo -e "${YELLOW}No mysql. Wait another sec... $counter${ENDC}"; + errmsg='No mysql'; + let counter=counter+1; + continue + fi + # if [ $(kubectl get ep -n milvus 2>/dev/null | grep milvus-redis | awk '{print $2}') == "" ]; then + # echo -e "${NORMAL}${YELLOW}No redis. Wait another sec... $counter${ENDC}"; + # errmsg='No redis'; + # let counter=counter+1; + # continue + # fi + break; + done + + if [ $counter -ge $timeout ]; then + echo -e "${RED}${BOLD}Start APP Error: $errmsg${NORMAL}${ENDC}" + exit 1; + fi + + echo -e "${GREEN}${BOLD}Setup requried database ...${NORMAL}${ENDC}" + setUpMysql + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Setup MySQL database timeout${NORMAL}${ENDC}" + exit 1 + fi + + echo -e "${GREEN}${BOLD}Start servers ...${NORMAL}${ENDC}" + kubectl apply -f milvus_stateful_servers.yaml + kubectl apply -f milvus_write_servers.yaml + + checkStatefulSevers + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Starting milvus-ro-servers timeout${NORMAL}${ENDC}" + exit 1 + fi + + checkDeployment "milvus-wo-servers" + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Starting milvus-wo-servers timeout${NORMAL}${ENDC}" + exit 1 + fi + + echo -e "${GREEN}${BOLD}Start rolebinding ...${NORMAL}${ENDC}" + kubectl apply -f milvus_rbac.yaml + + echo -e "${GREEN}${BOLD}Start proxies ...${NORMAL}${ENDC}" + kubectl apply -f milvus_proxy.yaml + + checkDeployment "milvus-proxy" + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Starting milvus-proxy timeout${NORMAL}${ENDC}" + exit 1 + fi + + # echo -e "${GREEN}${BOLD}Start flower ...${NORMAL}${ENDC}" + # kubectl apply -f milvus_flower.yaml + # checkDeployment "milvus-flower" + # if [ $? -ne 0 ]; then + # echo -e "${RED}${BOLD}Starting milvus-flower timeout${NORMAL}${ENDC}" + # exit 1 + # fi + +} + +function removeApps () { + # kubectl delete -f milvus_flower.yaml 2>/dev/null + kubectl delete -f milvus_proxy.yaml 2>/dev/null + kubectl delete -f milvus_stateful_servers.yaml 2>/dev/null + kubectl delete -f milvus_write_servers.yaml 2>/dev/null + kubectl delete -f milvus_rbac.yaml 2>/dev/null + # kubectl delete -f milvus_monitor.yaml 2>/dev/null +} + +function scaleDeployment() { + deployment_name=$1 + subcommand=$2 + des=$3 + + case $des in + -h|--help|"") + showscaleHelpMessage $subcommand + exit 3 + ;; + esac + + cur=$(kubectl get deployment -n milvus $deployment_name |grep $deployment_name |awk '{split($2, status, "/"); print status[2];}') + echo -e "${GREEN}Current Running ${BOLD}$cur ${GREEN}${deployment_name}, Scaling to ${BOLD}$des ...${ENDC}"; + scalecmd="kubectl scale deployment -n milvus ${deployment_name} --replicas=${des}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Error: ${GREEN}${scalecmd}${ENDC}" + exit 1 + fi + + checkDeployment $deployment_name + + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale ${deployment_name} timeout${NORMAL}${ENDC}" + scalecmd="kubectl scale deployment -n milvus ${deployment_name} --replicas=${cur}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Rollback Error: ${GREEN}${scalecmd}${ENDC}" + exit 2 + fi + echo -e "${BLUE}${BOLD}Scale Rollback to ${cur}${ENDC}" + exit 1 + fi + PrintScaleSuccessMessage $cur $des +} + +function scaleROServers() { + subcommand=$1 + des=$2 + case $des in + -h|--help|"") + showscaleHelpMessage $subcommand + exit 3 + ;; + esac + + cur=$(kubectl get statefulset -n milvus milvus-ro-servers |tail -n 1 |awk '{split($2, status, "/"); print status[2];}') + echo -e "${GREEN}Current Running ${BOLD}$cur ${GREEN}Readonly Servers, Scaling to ${BOLD}$des ...${ENDC}"; + scalecmd="kubectl scale sts milvus-ro-servers -n milvus --replicas=${des}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Error: ${GREEN}${scalecmd}${ENDC}" + exit 1 + fi + + checkStatefulSevers + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale milvus-ro-servers timeout${NORMAL}${ENDC}" + scalecmd="kubectl scale sts milvus-ro-servers -n milvus --replicas=${cur}" + ${scalecmd} + if [ $? -ne 0 ]; then + echo -e "${RED}${BOLD}Scale Rollback Error: ${GREEN}${scalecmd}${ENDC}" + exit 2 + fi + echo -e "${BLUE}${BOLD}Scale Rollback to ${cur}${ENDC}" + exit 1 + fi + + PrintScaleSuccessMessage $cur $des +} + + +case "$1" in + +cleanup) + kubectl delete -f . 2>/dev/null + echo -e "${BLUE}${BOLD}All resources are removed${NORMAL}${ENDC}" + ;; + +appdown) + removeApps; + echo -e "${BLUE}${BOLD}All pods are removed${NORMAL}${ENDC}" + ;; + +baseup) + startDependencies; + echo -e "${BLUE}${BOLD}All pvc, configmap and services up${NORMAL}${ENDC}" + ;; + +appup) + startApps; + echo -e "${BLUE}${BOLD}All pods up${NORMAL}${ENDC}" + ;; + +allup) + startDependencies; + sleep 2 + startApps; + echo -e "${BLUE}${BOLD}All resources and pods up${NORMAL}${ENDC}" + ;; + +scale-ro-server) + scaleROServers $1 $2 + ;; + +scale-proxy) + scaleDeployment "milvus-proxy" $1 $2 + ;; + +-h|--help|*) + showHelpMessage + ;; + +esac diff --git a/shards/manager.py b/shards/manager.py index 666ddd377e..4157b9343e 100644 --- a/shards/manager.py +++ b/shards/manager.py @@ -1,5 +1,4 @@ import fire -from sqlalchemy import and_ from mishards import db, settings @@ -12,17 +11,6 @@ class DBHandler: def drop_all(cls): db.drop_all() - @classmethod - def fun(cls, tid): - from mishards.factories import TablesFactory, TableFilesFactory, Tables - f = db.Session.query(Tables).filter(and_( - Tables.table_id == tid, - Tables.state != Tables.TO_DELETE) - ).first() - print(f) - - # f1 = TableFilesFactory() - if __name__ == '__main__': db.init_db(settings.DefaultConfig.SQLALCHEMY_DATABASE_URI) diff --git a/shards/mishards/.env.example b/shards/mishards/.env.example index c8848eaadf..f1c812a269 100644 --- a/shards/mishards/.env.example +++ b/shards/mishards/.env.example @@ -6,7 +6,7 @@ SERVER_TEST_PORT=19888 #SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False -SQL_ECHO=True +SQL_ECHO=False #SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4 SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False diff --git a/shards/mishards/settings.py b/shards/mishards/settings.py index 2694cd0a1f..8d7361dddc 100644 --- a/shards/mishards/settings.py +++ b/shards/mishards/settings.py @@ -13,6 +13,7 @@ else: DEBUG = env.bool('DEBUG', False) +MAX_RETRY = env.int('MAX_RETRY', 3) LOG_LEVEL = env.str('LOG_LEVEL', 'DEBUG' if DEBUG else 'INFO') LOG_PATH = env.str('LOG_PATH', '/tmp/mishards') @@ -22,9 +23,6 @@ TIMEZONE = env.str('TIMEZONE', 'UTC') from utils.logger_helper import config config(LOG_LEVEL, LOG_PATH, LOG_NAME, TIMEZONE) -TIMEOUT = env.int('TIMEOUT', 60) -MAX_RETRY = env.int('MAX_RETRY', 3) - SERVER_PORT = env.int('SERVER_PORT', 19530) SERVER_TEST_PORT = env.int('SERVER_TEST_PORT', 19530) WOSERVER = env.str('WOSERVER') @@ -69,12 +67,3 @@ class TestingConfig(DefaultConfig): SQL_ECHO = env.bool('SQL_TEST_ECHO', False) TRACER_CLASS_NAME = env.str('TRACER_CLASS_TEST_NAME', '') ROUTER_CLASS_NAME = env.str('ROUTER_CLASS_TEST_NAME', 'FileBasedHashRingRouter') - - -if __name__ == '__main__': - import logging - logger = logging.getLogger(__name__) - logger.debug('DEBUG') - logger.info('INFO') - logger.warn('WARN') - logger.error('ERROR') From bf87a834dd13ef570df5fec6f38245a36ba9100d Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 17:14:31 +0800 Subject: [PATCH 123/196] add travis CI --- .travis.yaml | 11 +++ ci/scripts/build.sh | 123 ++++++++++++++++++++++++++++++++ ci/travis/install_dependency.sh | 35 +++++++++ ci/travis/travis_build.sh | 24 +++++++ ci/travis/travis_env_common.sh | 10 +++ 5 files changed, 203 insertions(+) create mode 100644 .travis.yaml create mode 100755 ci/scripts/build.sh create mode 100755 ci/travis/install_dependency.sh create mode 100644 ci/travis/travis_build.sh create mode 100644 ci/travis/travis_env_common.sh diff --git a/.travis.yaml b/.travis.yaml new file mode 100644 index 0000000000..7b008a38e4 --- /dev/null +++ b/.travis.yaml @@ -0,0 +1,11 @@ +sudo: required +dist: bionic +addons: + apt: + update: true + packages: + - cmake-3.14 +install: + - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh +script: + - $TRAVIS_BUILD_DIR/ci/travis/travis_build.sh diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh new file mode 100755 index 0000000000..fb6f94f670 --- /dev/null +++ b/ci/scripts/build.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +set -ex + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located +done +SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core" +CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build" +BUILD_TYPE="Debug" +BUILD_UNITTEST="OFF" +INSTALL_PREFIX="/opt/milvus" +BUILD_COVERAGE="OFF" +USE_JFROG_CACHE="OFF" +RUN_CPPLINT="OFF" +CUDA_COMPILER=/usr/local/cuda/bin/nvcc + +while getopts "o:t:b:ulcjh" arg +do + case $arg in + o) + INSTALL_PREFIX=$OPTARG + ;; + t) + BUILD_TYPE=$OPTARG # BUILD_TYPE + ;; + b) + CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR + ;; + u) + echo "Build and run unittest cases" ; + BUILD_UNITTEST="ON"; + ;; + l) + RUN_CPPLINT="ON" + ;; + c) + BUILD_COVERAGE="ON" + ;; + j) + USE_JFROG_CACHE="ON" + ;; + h) # help + echo " + +parameter: +-o: install prefix(default: /opt/milvus) +-t: build type(default: Debug) +-b: core code build directory +-u: building unit test options(default: OFF) +-l: run cpplint, clang-format and clang-tidy(default: OFF) +-c: code coverage(default: OFF) +-j: use jfrog cache build directory(default: OFF) +-h: help + +usage: +./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} [-u] [-l] [-c] [-j] [-h] + " + exit 0 + ;; + ?) + echo "ERROR! unknown argument" + exit 1 + ;; + esac +done + +if [[ ! -d ${CORE_BUILD_DIR} ]]; then + mkdir ${CORE_BUILD_DIR} +fi + +pushd ${CORE_BUILD_DIR} + +# remove make cache since build.sh -l use default variables +# force update the variables each time +make rebuild_cache + +CMAKE_CMD="cmake \ +-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} +-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ +-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \ +-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ +-DBUILD_COVERAGE=${BUILD_COVERAGE} \ +-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ +${MILVUS_CORE_DIR}" +echo ${CMAKE_CMD} +${CMAKE_CMD} + +if [[ ${RUN_CPPLINT} == "ON" ]]; then + # cpplint check + make lint + if [ $? -ne 0 ]; then + echo "ERROR! cpplint check failed" + exit 1 + fi + echo "cpplint check passed!" + + # clang-format check + make check-clang-format + if [ $? -ne 0 ]; then + echo "ERROR! clang-format check failed" + exit 1 + fi + echo "clang-format check passed!" + +# # clang-tidy check +# make check-clang-tidy +# if [ $? -ne 0 ]; then +# echo "ERROR! clang-tidy check failed" +# rm -f CMakeCache.txt +# exit 1 +# fi +# echo "clang-tidy check passed!" +else + # compile and build + make -j8 || exit 1 + make install || exit 1 +fi diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh new file mode 100755 index 0000000000..bc4a972b8f --- /dev/null +++ b/ci/travis/install_dependency.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -ex + +wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB + +sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB + +echo "deb https://apt.repos.intel.com/mkl all main" | \ + sudo tee /etc/apt/sources.list.d/intel-mkl.list + +sudo apt-get update -qq + +sudo apt-get install -y -q --no-install-recommends \ + flex \ + bison \ + gfortran \ + lsb-core \ + libtool \ + automake \ + pkg-config \ + libboost-filesystem-dev \ + libboost-system-dev \ + libboost-regex-dev \ + intel-mkl-gnu-2019.4-243 \ + intel-mkl-core-2019.4-243 \ + libmysqlclient-dev \ + clang-format-6.0 \ + clang-tidy-6.0 \ + lcov + +sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \ + /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64 \ No newline at end of file diff --git a/ci/travis/travis_build.sh b/ci/travis/travis_build.sh new file mode 100644 index 0000000000..3cde1d5a4d --- /dev/null +++ b/ci/travis/travis_build.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -ex + +source $TRAVIS_BUILD_DIR/ci/travis/travis_env_common.sh + +only_library_mode=no + +while true; do + case "$1" in + --only-library) + only_library_mode=yes + shift ;; + *) break ;; + esac +done + +BUILD_COMMON_FLAGS="-t ${MILVUS_BUILD_TYPE} -o ${MILVUS_INSTALL_PREFIX} -b ${MILVUS_BUILD_DIR}" + +if [ $only_library_mode == "yes" ]; then + ${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} +else + ${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -u -c +fi \ No newline at end of file diff --git a/ci/travis/travis_env_common.sh b/ci/travis/travis_env_common.sh new file mode 100644 index 0000000000..ac63d2950b --- /dev/null +++ b/ci/travis/travis_env_common.sh @@ -0,0 +1,10 @@ +export MILVUS_CORE_DIR=${TRAVIS_BUILD_DIR}/core +export MILVUS_BUILD_DIR=${TRAVIS_BUILD_DIR}/core/cmake_build +export MILVUS_INSTALL_PREFIX=/opt/milvus +export MILVUS_TRAVIS_COVERAGE=${MILVUS_TRAVIS_COVERAGE:=0} + +if ["$MILVUS_TRAVIS_COVERAGE" == "1"]; then + export MILVUS_CPP_COVERAGE_FILE=${TRAVIS_BUILD_DIR}/output_new.info +fi + +export MILVUS_BUILD_TYPE=${MILVUS_BUILD_TYPE:=Release} From 191d63fc4b7f4e091c65a5b4b7ddc02a54a6d193 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 17:23:49 +0800 Subject: [PATCH 124/196] rename travis CI file --- .travis.yaml => .travis.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .travis.yaml => .travis.yml (100%) diff --git a/.travis.yaml b/.travis.yml similarity index 100% rename from .travis.yaml rename to .travis.yml From 2f571df6deac0513ea61c7c7c9a810ee94a199ab Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 17:27:10 +0800 Subject: [PATCH 125/196] disable install cmake-3.14 in travis.yml --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7b008a38e4..9abb055c52 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,8 +3,6 @@ dist: bionic addons: apt: update: true - packages: - - cmake-3.14 install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh script: From dc94aa22cbb69d56d3717fb65949a1372331462f Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 17:44:56 +0800 Subject: [PATCH 126/196] setting language in travis.yml --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9abb055c52..ec3b1c41fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +language: cpp sudo: required dist: bionic addons: From 1551adf908eba47c8a05f6ac31695a4e82e225d7 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 17:47:01 +0800 Subject: [PATCH 127/196] update ci/travis/travis_build.sh --- ci/travis/travis_build.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 ci/travis/travis_build.sh diff --git a/ci/travis/travis_build.sh b/ci/travis/travis_build.sh old mode 100644 new mode 100755 From be384f22f9e6d61fc113d1fb925705ce84d2ebcb Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 17:55:30 +0800 Subject: [PATCH 128/196] update ci/scripts/build.sh --- ci/scripts/build.sh | 39 ++++----------------------------- ci/travis/install_dependency.sh | 4 ++-- 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index fb6f94f670..6276359fae 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -ex +set -e SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink @@ -76,10 +76,6 @@ fi pushd ${CORE_BUILD_DIR} -# remove make cache since build.sh -l use default variables -# force update the variables each time -make rebuild_cache - CMAKE_CMD="cmake \ -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ @@ -91,33 +87,6 @@ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} ${CMAKE_CMD} -if [[ ${RUN_CPPLINT} == "ON" ]]; then - # cpplint check - make lint - if [ $? -ne 0 ]; then - echo "ERROR! cpplint check failed" - exit 1 - fi - echo "cpplint check passed!" - - # clang-format check - make check-clang-format - if [ $? -ne 0 ]; then - echo "ERROR! clang-format check failed" - exit 1 - fi - echo "clang-format check passed!" - -# # clang-tidy check -# make check-clang-tidy -# if [ $? -ne 0 ]; then -# echo "ERROR! clang-tidy check failed" -# rm -f CMakeCache.txt -# exit 1 -# fi -# echo "clang-tidy check passed!" -else - # compile and build - make -j8 || exit 1 - make install || exit 1 -fi +# compile and build +make -j8 || exit 1 +make install || exit 1 diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index bc4a972b8f..cec4e9c30f 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -ex +set -e wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB @@ -32,4 +32,4 @@ sudo apt-get install -y -q --no-install-recommends \ sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \ /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so -export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64 \ No newline at end of file +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64 From 6809839ffc4d8d0dc59f78f208296bf4d72522a2 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 18:02:33 +0800 Subject: [PATCH 129/196] Install cmake-3.14 on Travis CI --- ci/travis/install_dependency.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index cec4e9c30f..df4752b2ef 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -2,6 +2,9 @@ set -e +wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | + sudo tar --strip-components=1 -xz -C /usr/local + wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB From 84c47daf866a4e64e68d599e33a32d84c8403fed Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 6 Nov 2019 18:13:53 +0800 Subject: [PATCH 130/196] add before_install stage in .travis.yml --- .travis.yml | 2 ++ ci/travis/install_dependency.sh | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index ec3b1c41fa..2f9462e261 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,8 @@ dist: bionic addons: apt: update: true +before_install: + - wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | sudo tar --strip-components=1 -xz -C /usr/local install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh script: diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index df4752b2ef..cec4e9c30f 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -2,9 +2,6 @@ set -e -wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | - sudo tar --strip-components=1 -xz -C /usr/local - wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB From 81dc258a07ba9e3db341fd8f9b2029d7f1216c45 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Thu, 7 Nov 2019 10:03:55 +0800 Subject: [PATCH 131/196] add before_install stage in .travis.yml --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2f9462e261..760d2d8ace 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,8 @@ addons: apt: update: true before_install: - - wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | sudo tar --strip-components=1 -xz -C /usr/local + - wget -O cmake.sh https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh + - sudo sh cmake.sh --skip-license --exclude-subdir --prefix=/usr/local install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh script: From e7994a7c00629181ca133385ff1f10fe01dcce0a Mon Sep 17 00:00:00 2001 From: quicksilver Date: Thu, 7 Nov 2019 10:16:15 +0800 Subject: [PATCH 132/196] update cmake version --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 760d2d8ace..1f98f4ab83 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ addons: before_install: - wget -O cmake.sh https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh - sudo sh cmake.sh --skip-license --exclude-subdir --prefix=/usr/local + - export PATH="/usr/local:$PATH" install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh script: From 35264a1f85da02f44ba22dd01362f2d7db54869e Mon Sep 17 00:00:00 2001 From: quicksilver Date: Thu, 7 Nov 2019 10:22:25 +0800 Subject: [PATCH 133/196] update cmake version --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1f98f4ab83..df573e74f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ addons: before_install: - wget -O cmake.sh https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh - sudo sh cmake.sh --skip-license --exclude-subdir --prefix=/usr/local - - export PATH="/usr/local:$PATH" + - export PATH="$PATH:/usr/local" install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh script: From 32b29fe7b893b397c978ccc649348115087a8eb7 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Thu, 7 Nov 2019 10:29:00 +0800 Subject: [PATCH 134/196] update cmake version --- .travis.yml | 4 ---- core/CMakeLists.txt | 4 ++-- core/src/index/CMakeLists.txt | 2 +- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index df573e74f7..ec3b1c41fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,10 +4,6 @@ dist: bionic addons: apt: update: true -before_install: - - wget -O cmake.sh https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh - - sudo sh cmake.sh --skip-license --exclude-subdir --prefix=/usr/local - - export PATH="$PATH:/usr/local" install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh script: diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index a59e80a6e8..800993e1ce 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -18,7 +18,7 @@ #------------------------------------------------------------------------------- -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.12) message(STATUS "Building using CMake version: ${CMAKE_VERSION}") set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") @@ -273,4 +273,4 @@ if (${CLANG_TIDY_FOUND}) --source_dir ${CMAKE_CURRENT_SOURCE_DIR}/src ${MILVUS_LINT_QUIET}) -endif () \ No newline at end of file +endif () diff --git a/core/src/index/CMakeLists.txt b/core/src/index/CMakeLists.txt index fcb291eda2..560a73ea16 100644 --- a/core/src/index/CMakeLists.txt +++ b/core/src/index/CMakeLists.txt @@ -18,7 +18,7 @@ #------------------------------------------------------------------------------- -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.12) message(STATUS "------------------------------KNOWHERE-----------------------------------") message(STATUS "Building using CMake version: ${CMAKE_VERSION}") From 6e8f397a69c07cc62932d63664e1edd9cee7d9b5 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Thu, 7 Nov 2019 10:58:00 +0800 Subject: [PATCH 135/196] cpu version for default setting --- ci/scripts/build.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 6276359fae..253ee5893d 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -18,9 +18,10 @@ INSTALL_PREFIX="/opt/milvus" BUILD_COVERAGE="OFF" USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" +CPU_VERSION="ON" CUDA_COMPILER=/usr/local/cuda/bin/nvcc -while getopts "o:t:b:ulcjh" arg +while getopts "o:t:b:gulcjh" arg do case $arg in o) @@ -32,6 +33,9 @@ do b) CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR ;; + g) + CPU_VERSION="OFF"; + ;; u) echo "Build and run unittest cases" ; BUILD_UNITTEST="ON"; @@ -52,6 +56,7 @@ parameter: -o: install prefix(default: /opt/milvus) -t: build type(default: Debug) -b: core code build directory +-g: gpu version -u: building unit test options(default: OFF) -l: run cpplint, clang-format and clang-tidy(default: OFF) -c: code coverage(default: OFF) @@ -80,6 +85,7 @@ CMAKE_CMD="cmake \ -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \ +-DMILVUS_CPU_VERSION=${CPU_VERSION} \ -DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ From 3845d970fe0c83aeb9cdd0ca91efc9431d6dadbc Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 7 Nov 2019 13:54:07 +0800 Subject: [PATCH 136/196] (shards): update change log for #226 --- CHANGELOG.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d46ed6070..7f6a3d37f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Please mark all change in change log and use the ticket from JIRA. ## Feature - \#12 - Pure CPU version for Milvus +- \#226 - Experimental shards middleware for Milvus ## Improvement @@ -84,7 +85,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-658 - Fix SQ8 Hybrid can't search - MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources - \#9 - Change default gpu_cache_capacity to 4 -- \#20 - C++ sdk example get grpc error +- \#20 - C++ sdk example get grpc error - \#23 - Add unittest to improve code coverage - \#31 - make clang-format failed after run build.sh -l - \#39 - Create SQ8H index hang if using github server version @@ -136,7 +137,7 @@ Please mark all change in change log and use the ticket from JIRA. - MS-635 - Add compile option to support customized faiss - MS-660 - add ubuntu_build_deps.sh - \#18 - Add all test cases - + # Milvus 0.4.0 (2019-09-12) ## Bug @@ -345,11 +346,11 @@ Please mark all change in change log and use the ticket from JIRA. - MS-82 - Update server startup welcome message - MS-83 - Update vecwise to Milvus - MS-77 - Performance issue of post-search action -- MS-22 - Enhancement for MemVector size control +- MS-22 - Enhancement for MemVector size control - MS-92 - Unify behavior of debug and release build - MS-98 - Install all unit test to installation directory - MS-115 - Change is_startup of metric_config switch from true to on -- MS-122 - Archive criteria config +- MS-122 - Archive criteria config - MS-124 - HasTable interface - MS-126 - Add more error code - MS-128 - Change default db path From 066952ca23ff81f0f9befbdc6273dd0b1b847132 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 7 Nov 2019 14:01:18 +0800 Subject: [PATCH 137/196] (shards): remove build.sh --- shards/build.sh | 39 --------------------------------------- 1 file changed, 39 deletions(-) delete mode 100755 shards/build.sh diff --git a/shards/build.sh b/shards/build.sh deleted file mode 100755 index fad30518f2..0000000000 --- a/shards/build.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -BOLD=`tput bold` -NORMAL=`tput sgr0` -YELLOW='\033[1;33m' -ENDC='\033[0m' - -echo -e "${BOLD}MISHARDS_REGISTRY=${MISHARDS_REGISTRY}${ENDC}" - -function build_image() { - dockerfile=$1 - remote_registry=$2 - tagged=$2 - buildcmd="docker build -t ${tagged} -f ${dockerfile} ." - echo -e "${BOLD}$buildcmd${NORMAL}" - $buildcmd - pushcmd="docker push ${remote_registry}" - echo -e "${BOLD}$pushcmd${NORMAL}" - $pushcmd - echo -e "${YELLOW}${BOLD}Image: ${remote_registry}${NORMAL}${ENDC}" -} - -case "$1" in - -all) - [[ -z $MISHARDS_REGISTRY ]] && { - echo -e "${YELLOW}Error: Please set docker registry first:${ENDC}\n\t${BOLD}export MISHARDS_REGISTRY=xxxx\n${ENDC}" - exit 1 - } - - version="" - [[ ! -z $2 ]] && version=":${2}" - build_image "Dockerfile" "${MISHARDS_REGISTRY}${version}" "${MISHARDS_REGISTRY}" - ;; -*) - echo "Usage: [option...] {base | apps}" - echo "all, Usage: build.sh all [tagname|] => {docker_registry}:\${tagname}" - ;; -esac From ca3447fae2af2a749939a18836ffde4f1d04e2a3 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 7 Nov 2019 14:06:05 +0800 Subject: [PATCH 138/196] (shards/refactor): remove README.md under kubernetes_demo --- shards/kubernetes_demo/README.md | 107 ------------------------------- 1 file changed, 107 deletions(-) delete mode 100644 shards/kubernetes_demo/README.md diff --git a/shards/kubernetes_demo/README.md b/shards/kubernetes_demo/README.md deleted file mode 100644 index 933fcd56a8..0000000000 --- a/shards/kubernetes_demo/README.md +++ /dev/null @@ -1,107 +0,0 @@ -This document is a gentle introduction to Milvus Cluster, that does not use complex to understand distributed systems concepts. It provides instructions about how to setup a cluster, test, and operate it, without going into the details that are covered in the Milvus Cluster specification but just describing how the system behaves from the point of view of the user. - -However this tutorial tries to provide information about the availability and consistency characteristics of Milvus Cluster from the point of view of the final user, stated in a simple to understand way. - -If you plan to run a serious Milvus Cluster deployment, the more formal specification is a suggested reading, even if not strictly required. However it is a good idea to start from this document, play with Milvus Cluster some time, and only later read the specification. - -## Milvus Cluster Introduction -### Infrastructure -* Kubenetes Cluster With Nvida GPU Node -* Install Nvida Docker in Cluster - -### Requried Docker Registry -* Milvus Server: ```registry.zilliz.com/milvus/engine:${version>=0.3.1}``` -* Milvus Celery Apps: ```registry.zilliz.com/milvus/celery-apps:${version>=v0.2.1}``` - -### Cluster Ability -* Milvus Cluster provides a way to run a Milvus installation where query requests are automatically sharded across multiple milvus readonly nodes. -* Milvus Cluster provides availability during partitions, that is in pratical terms the ability to continue the operations when some nodes fail or are not able to communicate. - -### Metastore -Milvus supports 2 backend databases for deployment: -* Splite3: Single mode only. -* MySQL: Single/Cluster mode -* ETCD: `TODO` - -### Storage -Milvus supports 2 backend storage for deployment: -* Local filesystem: Convenient for use and deployment but not reliable. -* S3 OOS: Reliable: Need extra configuration. Need external storage service. - -### Message Queue -Milvus supports various MQ backend for deployment: -* Redis -* Rabbitmq -* MySQL/PG/MongoDB - -### Cache -* Milvus supports `Redis` as Cache backend for deployment. To reduce the system complexity, we recommend to use `Redis` as MQ backend. - -### Workflow -* Milvus Cluster use Celery as workflow scheduler. -* Milvus Cluster workflow calculation node can be scaled. -* Milvus Cluster only contains 1 worflow monitor node. Monitor node detects caculation nodes status and provides decision for work scheduling. -* Milvus Cluster supports different workflow result backend and we recommend to use `Redis` as result backend for performance consideration. - -### Writeonly Node -* Milvus can be configured in write-only mode. -* Right now Milvus Cluster only provide 1 write-only node. - -### Readonly Node -* Milvus can be configured in readonly mode. -* Milvus Cluster automatically shard incoming query requests across multiple readonly nodes. -* Milvus Cluster supports readonly nodes scaling. -* Milvus Cluster provides pratical solution to avoid performance degradation during cluster rebalance. - -### Proxy -* Milvus Cluster communicates with clients by proxy. -* Milvus Cluster supports proxy scaling. - -### Monitor -* Milvus Cluster suports metrics monitoring by prometheus. -* Milvus Cluster suports workflow tasks monitoring by flower. -* Milvus Cluster suports cluster monitoring by all kubernetes ecosystem monitoring tools. - -## Milvus Cluster Kubernetes Resources -### PersistentVolumeClaim -* LOG PersistentVolume: `milvus-log-disk` - -### ConfigMap -* Celery workflow configmap: `milvus-celery-configmap`::`milvus_celery_config.yml` -* Proxy configmap: `milvus-proxy-configmap`::`milvus_proxy_config.yml` -* Readonly nodes configmap: `milvus-roserver-configmap`::`config.yml`, `milvus-roserver-configmap`::`log.conf` -* Write-only nodes configmap: `milvus-woserver-configmap`::`config.yml`, `milvus-woserver-configmap`::`log.conf` - -### Services -* Mysql service: `milvus-mysql` -* Redis service: `milvus-redis` -* Rroxy service: `milvus-proxy-servers` -* Write-only servers service: `milvus-wo-servers` - -### StatefulSet -* Readonly stateful servers: `milvus-ro-servers` - -### Deployment -* Worflow monitor: `milvus-monitor` -* Worflow workers: `milvus-workers` -* Write-only servers: `milvus-wo-servers` -* Proxy: `milvus-proxy` - -## Milvus Cluster Configuration -### Write-only server: -```milvus-woserver-configmap::config.yml: - server_config.mode: cluster - db_config.db_backend_url: mysql://${user}:${password}@milvus-mysql/${dbname} -``` -### Readonly server: -```milvus-roserver-configmap::config.yml: - server_config.mode: read_only - db_config.db_backend_url: mysql://\${user}:${password}@milvus-mysql/${dbname} -``` -### Celery workflow: -```milvus-celery-configmap::milvus_celery_config.yml: - DB_URI=mysql+mysqlconnector://${user}:${password}@milvus-mysql/${dbname} -``` -### Proxy workflow: -```milvus-proxy-configmap::milvus_proxy_config.yml: -``` From b7030040b0eecfd71ddcbbc848e09ca929f576a0 Mon Sep 17 00:00:00 2001 From: "peng.xu" Date: Thu, 7 Nov 2019 14:08:17 +0800 Subject: [PATCH 139/196] (shards/refactor): remove start_services.yml --- shards/start_services.yml | 46 --------------------------------------- 1 file changed, 46 deletions(-) delete mode 100644 shards/start_services.yml diff --git a/shards/start_services.yml b/shards/start_services.yml deleted file mode 100644 index 95acdd045e..0000000000 --- a/shards/start_services.yml +++ /dev/null @@ -1,46 +0,0 @@ -version: "2.3" -services: - milvus: - runtime: nvidia - restart: always - image: registry.zilliz.com/milvus/engine:branch-0.5.0-release-4316de - # ports: - # - "0.0.0.0:19530:19530" - volumes: - - /tmp/milvus/db:/opt/milvus/db - - jaeger: - restart: always - image: jaegertracing/all-in-one:1.14 - ports: - - "0.0.0.0:5775:5775/udp" - - "0.0.0.0:16686:16686" - - "0.0.0.0:9441:9441" - environment: - COLLECTOR_ZIPKIN_HTTP_PORT: 9411 - - mishards: - restart: always - image: milvusdb/mishards - ports: - - "0.0.0.0:19530:19531" - - "0.0.0.0:19532:19532" - volumes: - - /tmp/milvus/db:/tmp/milvus/db - # - /tmp/mishards_env:/source/mishards/.env - command: ["python", "mishards/main.py"] - environment: - FROM_EXAMPLE: 'true' - DEBUG: 'true' - SERVER_PORT: 19531 - WOSERVER: tcp://milvus_wr:19530 - DISCOVERY_PLUGIN_PATH: static - DISCOVERY_STATIC_HOSTS: milvus_wr,milvus_ro - TRACER_CLASS_NAME: jaeger - TRACING_SERVICE_NAME: mishards-demo - TRACING_REPORTING_HOST: jaeger - TRACING_REPORTING_PORT: 5775 - - depends_on: - - milvus - - jaeger From 3c0c4646b07f10a2f77d4bb778f954df8c7487e1 Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Thu, 7 Nov 2019 15:35:53 +0800 Subject: [PATCH 140/196] issue 229, 230 and 237 --- core/CMakeLists.txt | 24 +- core/build.sh | 16 +- core/cmake/DefineOptions.cmake | 76 ++--- core/conf/server_config.template | 9 +- core/src/CMakeLists.txt | 3 + core/src/index/CMakeLists.txt | 14 +- core/src/index/cmake/DefineOptionsCore.cmake | 19 +- core/src/main.cpp | 10 +- core/src/server/Config.cpp | 143 +++++---- core/src/server/Config.h | 12 + core/src/utils/StringHelpFunctions.cpp | 44 ++- core/src/utils/StringHelpFunctions.h | 5 +- core/unittest/CMakeLists.txt | 1 + core/unittest/server/test_config.cpp | 321 +++++++++++++++++++ core/unittest/server/test_util.cpp | 5 +- 15 files changed, 536 insertions(+), 166 deletions(-) diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 5c63eb39da..cd0aa8196f 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -31,16 +31,16 @@ GET_CURRENT_TIME(BUILD_TIME) string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME}) message(STATUS "Build time = ${BUILD_TIME}") -MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME) +MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME) execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) - if(GIT_BRANCH_NAME STREQUAL "") + if (GIT_BRANCH_NAME STREQUAL "") execute_process(COMMAND "git" symbolic-ref --short -q HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) - endif() -ENDMACRO (GET_GIT_BRANCH_NAME) + endif () +ENDMACRO(GET_GIT_BRANCH_NAME) GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME) message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}") -if(NOT GIT_BRANCH_NAME STREQUAL "") +if (NOT GIT_BRANCH_NAME STREQUAL "") string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME}) endif () @@ -69,7 +69,7 @@ if (MILVUS_VERSION_MAJOR STREQUAL "" OR MILVUS_VERSION_PATCH STREQUAL "") message(WARNING "Failed to determine Milvus version from git branch name") set(MILVUS_VERSION "0.6.0") -endif() +endif () message(STATUS "Build version = ${MILVUS_VERSION}") configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/config.h @ONLY) @@ -118,17 +118,17 @@ include(DefineOptions) include(BuildUtils) include(ThirdPartyPackages) -set(MILVUS_GPU_VERSION false) -if (MILVUS_CPU_VERSION) - message(STATUS "Building Milvus CPU version") - add_compile_definitions("MILVUS_CPU_VERSION") -else () +set(MILVUS_CPU_VERSION false) +if (MILVUS_GPU_VERSION) message(STATUS "Building Milvus GPU version") - set(MILVUS_GPU_VERSION true) add_compile_definitions("MILVUS_GPU_VERSION") enable_language(CUDA) find_package(CUDA 10 REQUIRED) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") +else () + message(STATUS "Building Milvus CPU version") + set(MILVUS_CPU_VERSION true) + add_compile_definitions("MILVUS_CPU_VERSION") endif () if (CMAKE_BUILD_TYPE STREQUAL "Release") diff --git a/core/build.sh b/core/build.sh index 819278b94a..e844528ad3 100755 --- a/core/build.sh +++ b/core/build.sh @@ -12,7 +12,7 @@ USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" CUSTOMIZATION="OFF" # default use ori faiss CUDA_COMPILER=/usr/local/cuda/bin/nvcc -CPU_VERSION="OFF" +GPU_VERSION="OFF" #defaults to CPU version WITH_MKL="OFF" CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}" @@ -51,7 +51,7 @@ do c) BUILD_COVERAGE="ON" ;; - g) + z) PROFILING="ON" ;; j) @@ -60,8 +60,8 @@ do x) CUSTOMIZATION="OFF" # force use ori faiss ;; - z) - CPU_VERSION="ON" + g) + GPU_VERSION="ON" ;; m) WITH_MKL="ON" @@ -77,14 +77,14 @@ parameter: -l: run cpplint, clang-format and clang-tidy(default: OFF) -r: remove previous build directory(default: OFF) -c: code coverage(default: OFF) --g: profiling(default: OFF) +-z: profiling(default: OFF) -j: use jfrog cache build directory(default: OFF) --z: build pure CPU version(default: OFF) +-g: build GPU version(default: OFF) -m: build with MKL(default: OFF) -h: help usage: -./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-z] [-m] [-h] +./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-h] " exit 0 ;; @@ -116,7 +116,7 @@ CMAKE_CMD="cmake \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ -DCUSTOMIZATION=${CUSTOMIZATION} \ -DFAISS_URL=${CUSTOMIZED_FAISS_URL} \ --DMILVUS_CPU_VERSION=${CPU_VERSION} \ +-DMILVUS_GPU_VERSION=${GPU_VERSION} \ -DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ ../" echo ${CMAKE_CMD} diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index c7f4f73d94..4d5647b24d 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -13,16 +13,16 @@ macro(define_option name description default) endmacro() function(list_join lst glue out) - if("${${lst}}" STREQUAL "") + if ("${${lst}}" STREQUAL "") set(${out} "" PARENT_SCOPE) return() - endif() + endif () list(GET ${lst} 0 joined) list(REMOVE_AT ${lst} 0) - foreach(item ${${lst}}) + foreach (item ${${lst}}) set(joined "${joined}${glue}${item}") - endforeach() + endforeach () set(${out} ${joined} PARENT_SCOPE) endfunction() @@ -35,15 +35,15 @@ macro(define_option_string name description default) set("${name}_OPTION_ENUM" ${ARGN}) list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM") - if(NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) + if (NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) set_property(CACHE ${name} PROPERTY STRINGS ${ARGN}) - endif() + endif () endmacro() #---------------------------------------------------------------------- -set_option_category("CPU version") +set_option_category("GPU version") -define_option(MILVUS_CPU_VERSION "Build CPU version only" OFF) +define_option(MILVUS_GPU_VERSION "Build GPU version" OFF) #---------------------------------------------------------------------- set_option_category("Thirdparty") @@ -51,11 +51,11 @@ set_option_category("Thirdparty") set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO") define_option_string(MILVUS_DEPENDENCY_SOURCE - "Method to use for acquiring MILVUS's build dependencies" - "${MILVUS_DEPENDENCY_SOURCE_DEFAULT}" - "AUTO" - "BUNDLED" - "SYSTEM") + "Method to use for acquiring MILVUS's build dependencies" + "${MILVUS_DEPENDENCY_SOURCE_DEFAULT}" + "AUTO" + "BUNDLED" + "SYSTEM") define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD "Show output from ExternalProjects rather than just logging to files" ON) @@ -75,14 +75,14 @@ define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON) if (MILVUS_ENABLE_PROFILING STREQUAL "ON") define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON) define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON) -endif() +endif () define_option(MILVUS_WITH_GRPC "Build with GRPC" ON) define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON) #---------------------------------------------------------------------- -if(MSVC) +if (MSVC) set_option_category("MSVC") define_option(MSVC_LINK_VERBOSE @@ -90,7 +90,7 @@ if(MSVC) OFF) define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF) -endif() +endif () #---------------------------------------------------------------------- @@ -99,9 +99,9 @@ set_option_category("Test and benchmark") unset(MILVUS_BUILD_TESTS CACHE) if (BUILD_UNIT_TEST) define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON) -else() +else () define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF) -endif(BUILD_UNIT_TEST) +endif (BUILD_UNIT_TEST) #---------------------------------------------------------------------- macro(config_summary) @@ -113,12 +113,12 @@ macro(config_summary) message(STATUS " Generator: ${CMAKE_GENERATOR}") message(STATUS " Build type: ${CMAKE_BUILD_TYPE}") message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}") - if(${CMAKE_EXPORT_COMPILE_COMMANDS}) + if (${CMAKE_EXPORT_COMPILE_COMMANDS}) message( STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json") - endif() + endif () - foreach(category ${MILVUS_OPTION_CATEGORIES}) + foreach (category ${MILVUS_OPTION_CATEGORIES}) message(STATUS) message(STATUS "${category} options:") @@ -126,50 +126,50 @@ macro(config_summary) set(option_names ${MILVUS_${category}_OPTION_NAMES}) set(max_value_length 0) - foreach(name ${option_names}) + foreach (name ${option_names}) string(LENGTH "\"${${name}}\"" value_length) - if(${max_value_length} LESS ${value_length}) + if (${max_value_length} LESS ${value_length}) set(max_value_length ${value_length}) - endif() - endforeach() + endif () + endforeach () - foreach(name ${option_names}) - if("${${name}_OPTION_TYPE}" STREQUAL "string") + foreach (name ${option_names}) + if ("${${name}_OPTION_TYPE}" STREQUAL "string") set(value "\"${${name}}\"") - else() + else () set(value "${${name}}") - endif() + endif () set(default ${${name}_OPTION_DEFAULT}) set(description ${${name}_OPTION_DESCRIPTION}) string(LENGTH ${description} description_length) - if(${description_length} LESS 70) + if (${description_length} LESS 70) string( SUBSTRING " " ${description_length} -1 description_padding) - else() + else () set(description_padding " ") - endif() + endif () set(comment "[${name}]") - if("${value}" STREQUAL "${default}") + if ("${value}" STREQUAL "${default}") set(comment "[default] ${comment}") - endif() + endif () - if(NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) + if (NOT ("${${name}_OPTION_ENUM}" STREQUAL "")) set(comment "${comment} [${${name}_OPTION_ENUM}]") - endif() + endif () string( SUBSTRING "${value} " 0 ${max_value_length} value) message(STATUS " ${description} ${description_padding} ${value} ${comment}") - endforeach() + endforeach () - endforeach() + endforeach () endmacro() diff --git a/core/conf/server_config.template b/core/conf/server_config.template index bee0a67b27..8fc31366e3 100644 --- a/core/conf/server_config.template +++ b/core/conf/server_config.template @@ -32,9 +32,9 @@ cache_config: cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] cache_insert_data: false # whether to load inserted data into cache, must be a boolean -# Skip the following config if you are using GPU version - gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer - gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] +#Uncomment the following config if you are using GPU version +# gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer +# gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] engine_config: use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times @@ -44,5 +44,4 @@ engine_config: resource_config: search_resources: # define the devices used for search computation, must be in format: cpu or gpux - cpu - - gpu0 - index_build_device: gpu0 # CPU / GPU used for building index, must be in format: cpu / gpux + index_build_device: cpu # CPU / GPU used for building index, must be in format: cpu / gpux diff --git a/core/src/CMakeLists.txt b/core/src/CMakeLists.txt index 9e4065d646..79b5e0f1da 100644 --- a/core/src/CMakeLists.txt +++ b/core/src/CMakeLists.txt @@ -24,6 +24,9 @@ include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus) add_subdirectory(index) +if (BUILD_FAISS_WITH_MKL) + add_compile_definitions("WITH_MKL") +endif () set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE) foreach (dir ${INDEX_INCLUDE_DIRS}) diff --git a/core/src/index/CMakeLists.txt b/core/src/index/CMakeLists.txt index fcb291eda2..3f00e69fce 100644 --- a/core/src/index/CMakeLists.txt +++ b/core/src/index/CMakeLists.txt @@ -22,7 +22,7 @@ cmake_minimum_required(VERSION 3.14) message(STATUS "------------------------------KNOWHERE-----------------------------------") message(STATUS "Building using CMake version: ${CMAKE_VERSION}") -set(KNOWHERE_VERSION "0.5.0") +set(KNOWHERE_VERSION "0.6.0") string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" KNOWHERE_BASE_VERSION "${KNOWHERE_VERSION}") project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES C CXX) set(CMAKE_CXX_STANDARD 14) @@ -72,17 +72,17 @@ include(ExternalProject) include(DefineOptionsCore) include(BuildUtilsCore) -set(KNOWHERE_GPU_VERSION false) -if (MILVUS_CPU_VERSION OR KNOWHERE_CPU_VERSION) - message(STATUS "Building Knowhere CPU version") - add_compile_definitions("MILVUS_CPU_VERSION") -else () +set(KNOWHERE_CPU_VERSION false) +if (MILVUS_GPU_VERSION OR KNOWHERE_GPU_VERSION) message(STATUS "Building Knowhere GPU version") add_compile_definitions("MILVUS_GPU_VERSION") - set(KNOWHERE_GPU_VERSION true) enable_language(CUDA) find_package(CUDA 10 REQUIRED) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda") +else () + message(STATUS "Building Knowhere CPU version") + set(KNOWHERE_CPU_VERSION true) + add_compile_definitions("MILVUS_CPU_VERSION") endif () include(ThirdPartyPackagesCore) diff --git a/core/src/index/cmake/DefineOptionsCore.cmake b/core/src/index/cmake/DefineOptionsCore.cmake index 99d1911d85..e49b3a779a 100644 --- a/core/src/index/cmake/DefineOptionsCore.cmake +++ b/core/src/index/cmake/DefineOptionsCore.cmake @@ -41,12 +41,12 @@ macro(define_option_string name description default) endmacro() #---------------------------------------------------------------------- -set_option_category("CPU version") +set_option_category("GPU version") -if (MILVUS_CPU_VERSION) - define_option(KNOWHERE_CPU_VERSION "Build CPU version only" ON) +if (MILVUS_GPU_VERSION) + define_option(KNOWHERE_GPU_VERSION "Build GPU version" ON) else () - define_option(KNOWHERE_CPU_VERSION "Build CPU version only" OFF) + define_option(KNOWHERE_GPU_VERSION "Build GPU version" OFF) endif () #---------------------------------------------------------------------- @@ -81,17 +81,6 @@ define_option(KNOWHERE_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON) define_option(BUILD_FAISS_WITH_MKL "Build FAISS with MKL" OFF) -#---------------------------------------------------------------------- -if (MSVC) - set_option_category("MSVC") - - define_option(MSVC_LINK_VERBOSE - "Pass verbose linking options when linking libraries and executables" - OFF) - - define_option(KNOWHERE_USE_STATIC_CRT "Build KNOWHERE with statically linked CRT" OFF) -endif () - #---------------------------------------------------------------------- set_option_category("Test and benchmark") diff --git a/core/src/main.cpp b/core/src/main.cpp index 9bb457cdda..c5b2d2dffe 100644 --- a/core/src/main.cpp +++ b/core/src/main.cpp @@ -51,7 +51,13 @@ print_banner() { std::cout << " /_/ /_/___/____/___/\\____/___/ " << std::endl; std::cout << std::endl; std::cout << "Welcome to Milvus!" << std::endl; - std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << std::endl; + std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << ", with " +#ifdef WITH_MKL + << "MKL" +#else + << "OpenBLAS" +#endif + << " library." << std::endl; #ifdef MILVUS_CPU_VERSION std::cout << "You are using Milvus CPU version" << std::endl; #else @@ -137,7 +143,7 @@ main(int argc, char* argv[]) { s = server.Start(); if (s.ok()) { - std::cout << "Milvus server start successfully." << std::endl; + std::cout << "Milvus server started successfully!" << std::endl; } else { goto FAIL; } diff --git a/core/src/server/Config.cpp b/core/src/server/Config.cpp index 1d87e9aa6d..f241e661c8 100644 --- a/core/src/server/Config.cpp +++ b/core/src/server/Config.cpp @@ -25,6 +25,7 @@ #include "config/YamlConfigMgr.h" #include "server/Config.h" #include "utils/CommonUtil.h" +#include "utils/StringHelpFunctions.h" #include "utils/ValidationUtil.h" namespace milvus { @@ -343,6 +344,11 @@ Config::ResetDefaultConfig() { return s; } + s = SetResourceConfigSearchResources(CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT); + if (!s.ok()) { + return s; + } + s = SetResourceConfigIndexBuildDevice(CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT); if (!s.ok()) { return s; @@ -403,8 +409,7 @@ Status Config::CheckServerConfigDeployMode(const std::string& value) { if (value != "single" && value != "cluster_readonly" && value != "cluster_writable") { return Status(SERVER_INVALID_ARGUMENT, - "server_config.deploy_mode is not one of " - "single, cluster_readonly, and cluster_writable."); + "server_config.deploy_mode is not one of single, cluster_readonly, and cluster_writable."); } return Status::OK(); } @@ -592,15 +597,15 @@ Config::CheckCacheConfigGpuCacheCapacity(const std::string& value) { return Status(SERVER_INVALID_ARGUMENT, msg); } else { uint64_t gpu_cache_capacity = std::stoi(value) * GB; - int gpu_index; - Status s = GetResourceConfigIndexBuildDevice(gpu_index); + int device_id; + Status s = GetResourceConfigIndexBuildDevice(device_id); if (!s.ok()) { return s; } size_t gpu_memory; - if (!ValidationUtil::GetGpuMemory(gpu_index, gpu_memory).ok()) { - std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(gpu_index); + if (!ValidationUtil::GetGpuMemory(device_id, gpu_memory).ok()) { + std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(device_id); return Status(SERVER_UNEXPECTED_ERROR, msg); } else if (gpu_cache_capacity >= gpu_memory) { std::string msg = "Invalid gpu cache capacity: " + value + @@ -689,29 +694,33 @@ Config::CheckResourceConfigMode(const std::string& value) { } Status -CheckGpuDevice(const std::string& value) { +CheckResource(const std::string& value) { std::string s = value; std::transform(s.begin(), s.end(), s.begin(), ::tolower); + #ifdef MILVUS_CPU_VERSION if (s != "cpu") { return Status(SERVER_INVALID_ARGUMENT, "Invalid CPU resource: " + s); } #else - const std::regex pat("gpu(\\d+)"); - std::cmatch m; - if (!std::regex_match(value.c_str(), m, pat)) { - std::string msg = "Invalid gpu device: " + value + - ". Possible reason: resource_config.search_resources does not match your hardware."; + const std::regex pat("cpu|gpu(\\d+)"); + std::smatch m; + if (!std::regex_match(s, m, pat)) { + std::string msg = "Invalid search resource: " + value + + ". Possible reason: resource_config.search_resources is not in the format of cpux or gpux"; return Status(SERVER_INVALID_ARGUMENT, msg); } - int32_t gpu_index = std::stoi(value.substr(3)); - if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) { - std::string msg = "Invalid gpu device: " + value + - ". Possible reason: resource_config.search_resources does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + if (s.compare(0, 3, "gpu") == 0) { + int32_t gpu_index = std::stoi(s.substr(3)); + if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) { + std::string msg = "Invalid search resource: " + value + + ". Possible reason: resource_config.search_resources does not match your hardware."; + return Status(SERVER_INVALID_ARGUMENT, msg); + } } #endif + return Status::OK(); } @@ -724,38 +733,20 @@ Config::CheckResourceConfigSearchResources(const std::vector& value return Status(SERVER_INVALID_ARGUMENT, msg); } - bool cpu_found = false, gpu_found = false; - for (auto& device : value) { - if (device == "cpu") { - cpu_found = true; - continue; + for (auto& resource : value) { + auto status = CheckResource(resource); + if (!status.ok()) { + return Status(SERVER_INVALID_ARGUMENT, status.message()); } - if (CheckGpuDevice(device).ok()) { - gpu_found = true; - } else { - std::string msg = "Invalid search resource: " + device + - ". Possible reason: resource_config.search_resources does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); - } - } - - if (cpu_found && !gpu_found) { - std::string msg = - "Invalid search resource. Possible reason: resource_config.search_resources has only CPU resource."; - return Status(SERVER_INVALID_ARGUMENT, msg); } return Status::OK(); } Status Config::CheckResourceConfigIndexBuildDevice(const std::string& value) { - // if (value == "cpu") { - // return Status::OK(); - // } - if (!CheckGpuDevice(value).ok()) { - std::string msg = "Invalid index build device: " + value + - ". Possible reason: resource_config.index_build_device does not match your hardware."; - return Status(SERVER_INVALID_ARGUMENT, msg); + auto status = CheckResource(value); + if (!status.ok()) { + return Status(SERVER_INVALID_ARGUMENT, status.message()); } return Status::OK(); } @@ -796,6 +787,22 @@ Config::GetConfigStr(const std::string& parent_key, const std::string& child_key return value; } +std::string +Config::GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim, + const std::string& default_value) { + std::string value; + if (!GetConfigValueInMem(parent_key, child_key, value).ok()) { + std::vector sequence = GetConfigNode(parent_key).GetSequence(child_key); + if (sequence.empty()) { + value = default_value; + } else { + server::StringHelpFunctions::MergeStringWithDelimeter(sequence, delim, value); + } + SetConfigValueInMem(parent_key, child_key, value); + } + return value; +} + Status Config::GetServerConfigAddress(std::string& value) { value = GetConfigStr(CONFIG_SERVER, CONFIG_SERVER_ADDRESS, CONFIG_SERVER_ADDRESS_DEFAULT); @@ -1019,8 +1026,10 @@ Config::GetResourceConfigMode(std::string& value) { Status Config::GetResourceConfigSearchResources(std::vector& value) { - ConfigNode resource_config = GetConfigNode(CONFIG_RESOURCE); - value = resource_config.GetSequence(CONFIG_RESOURCE_SEARCH_RESOURCES); + std::string str = + GetConfigSequenceStr(CONFIG_RESOURCE, CONFIG_RESOURCE_SEARCH_RESOURCES, + CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT); + server::StringHelpFunctions::SplitStringByDelimeter(str, CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, value); return CheckResourceConfigSearchResources(value); } @@ -1033,10 +1042,10 @@ Config::GetResourceConfigIndexBuildDevice(int32_t& value) { return s; } - if (str != "cpu") { - value = std::stoi(str.substr(3)); + if (str == "cpu") { + value = CPU_DEVICE_ID; } else { - value = -1; + value = std::stoi(str.substr(3)); } return Status::OK(); @@ -1163,7 +1172,7 @@ Config::SetMetricConfigEnableMonitor(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_ENABLE_MONITOR, value); + SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_ENABLE_MONITOR, value); return Status::OK(); } @@ -1174,7 +1183,7 @@ Config::SetMetricConfigCollector(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_COLLECTOR, value); + SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_COLLECTOR, value); return Status::OK(); } @@ -1185,7 +1194,7 @@ Config::SetMetricConfigPrometheusPort(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_PROMETHEUS_PORT, value); + SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_PROMETHEUS_PORT, value); return Status::OK(); } @@ -1197,7 +1206,7 @@ Config::SetCacheConfigCpuCacheCapacity(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CPU_CACHE_CAPACITY, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CPU_CACHE_CAPACITY, value); return Status::OK(); } @@ -1208,7 +1217,7 @@ Config::SetCacheConfigCpuCacheThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CPU_CACHE_THRESHOLD, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CPU_CACHE_THRESHOLD, value); return Status::OK(); } @@ -1219,7 +1228,7 @@ Config::SetCacheConfigGpuCacheCapacity(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_GPU_CACHE_CAPACITY, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_GPU_CACHE_CAPACITY, value); return Status::OK(); } @@ -1230,7 +1239,7 @@ Config::SetCacheConfigGpuCacheThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_GPU_CACHE_THRESHOLD, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_GPU_CACHE_THRESHOLD, value); return Status::OK(); } @@ -1241,7 +1250,7 @@ Config::SetCacheConfigCacheInsertData(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CACHE_INSERT_DATA, value); + SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CACHE_INSERT_DATA, value); return Status::OK(); } @@ -1253,7 +1262,7 @@ Config::SetEngineConfigUseBlasThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_USE_BLAS_THRESHOLD, value); + SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_USE_BLAS_THRESHOLD, value); return Status::OK(); } @@ -1264,7 +1273,7 @@ Config::SetEngineConfigOmpThreadNum(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_OMP_THREAD_NUM, value); + SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_OMP_THREAD_NUM, value); return Status::OK(); } @@ -1275,7 +1284,7 @@ Config::SetEngineConfigGpuSearchThreshold(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, value); + SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, value); return Status::OK(); } @@ -1287,7 +1296,21 @@ Config::SetResourceConfigMode(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_MODE, value); + SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_MODE, value); + return Status::OK(); +} + +Status +Config::SetResourceConfigSearchResources(const std::string& value) { + std::vector res_vec; + server::StringHelpFunctions::SplitStringByDelimeter(value, CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, res_vec); + + Status s = CheckResourceConfigSearchResources(res_vec); + if (!s.ok()) { + return s; + } + + SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_SEARCH_RESOURCES, value); return Status::OK(); } @@ -1298,7 +1321,7 @@ Config::SetResourceConfigIndexBuildDevice(const std::string& value) { return s; } - SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value); + SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value); return Status::OK(); } diff --git a/core/src/server/Config.h b/core/src/server/Config.h index a09939a9cb..b82614e0b9 100644 --- a/core/src/server/Config.h +++ b/core/src/server/Config.h @@ -92,12 +92,19 @@ static const char* CONFIG_RESOURCE = "resource_config"; static const char* CONFIG_RESOURCE_MODE = "mode"; static const char* CONFIG_RESOURCE_MODE_DEFAULT = "simple"; static const char* CONFIG_RESOURCE_SEARCH_RESOURCES = "search_resources"; +static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER = ","; +#ifdef MILVUS_CPU_VERSION +static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT = "cpu"; +#else +static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT = "cpu,gpu0"; +#endif static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE = "index_build_device"; #ifdef MILVUS_CPU_VERSION static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "cpu"; #else static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "gpu0"; #endif +const int32_t CPU_DEVICE_ID = -1; class Config { public: @@ -185,6 +192,9 @@ class Config { std::string GetConfigStr(const std::string& parent_key, const std::string& child_key, const std::string& default_value = ""); + std::string + GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim = ",", + const std::string& default_value = ""); public: /* server config */ @@ -306,6 +316,8 @@ class Config { Status SetResourceConfigMode(const std::string& value); Status + SetResourceConfigSearchResources(const std::string& value); + Status SetResourceConfigIndexBuildDevice(const std::string& value); private: diff --git a/core/src/utils/StringHelpFunctions.cpp b/core/src/utils/StringHelpFunctions.cpp index 230cc1a0ff..2db37829d6 100644 --- a/core/src/utils/StringHelpFunctions.cpp +++ b/core/src/utils/StringHelpFunctions.cpp @@ -39,39 +39,53 @@ StringHelpFunctions::TrimStringQuote(std::string& string, const std::string& qou } } -Status +void StringHelpFunctions::SplitStringByDelimeter(const std::string& str, const std::string& delimeter, std::vector& result) { if (str.empty()) { - return Status::OK(); + return; } - size_t last = 0; - size_t index = str.find_first_of(delimeter, last); - while (index != std::string::npos) { - result.emplace_back(str.substr(last, index - last)); - last = index + 1; - index = str.find_first_of(delimeter, last); + size_t prev = 0, pos = 0; + while (true) { + pos = str.find_first_of(delimeter, prev); + if (pos == std::string::npos) { + result.emplace_back(str.substr(prev)); + break; + } else { + result.emplace_back(str.substr(prev, pos - prev)); + prev = pos + 1; + } } - if (index - last > 0) { - std::string temp = str.substr(last); - result.emplace_back(temp); +} + +void +StringHelpFunctions::MergeStringWithDelimeter(const std::vector& strs, const std::string& delimeter, + std::string& result) { + if (strs.empty()) { + result = ""; + return; } - return Status::OK(); + result = strs[0]; + for (size_t i = 1; i < strs.size(); i++) { + result = result + delimeter + strs[i]; + } } Status StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::string& delimeter, const std::string& quote, std::vector& result) { if (quote.empty()) { - return SplitStringByDelimeter(str, delimeter, result); + SplitStringByDelimeter(str, delimeter, result); + return Status::OK(); } size_t last = 0; size_t index = str.find_first_of(quote, last); if (index == std::string::npos) { - return SplitStringByDelimeter(str, delimeter, result); + SplitStringByDelimeter(str, delimeter, result); + return Status::OK(); } std::string process_str = str; @@ -116,7 +130,7 @@ StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::strin } if (!process_str.empty()) { - return SplitStringByDelimeter(process_str, delimeter, result); + SplitStringByDelimeter(process_str, delimeter, result); } return Status::OK(); diff --git a/core/src/utils/StringHelpFunctions.h b/core/src/utils/StringHelpFunctions.h index cb355332f1..0cb303bb4f 100644 --- a/core/src/utils/StringHelpFunctions.h +++ b/core/src/utils/StringHelpFunctions.h @@ -43,9 +43,12 @@ class StringHelpFunctions { // ,b, | b | // ,, | | // a a - static Status + static void SplitStringByDelimeter(const std::string& str, const std::string& delimeter, std::vector& result); + static void + MergeStringWithDelimeter(const std::vector& strs, const std::string& delimeter, std::string& result); + // assume the table has two columns, quote='\"', delimeter=',' // a,b a | b // "aa,gg,yy",b aa,gg,yy | b diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index 7bcc21f7ee..e485bd729a 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -77,6 +77,7 @@ set(helper_files ${MILVUS_ENGINE_SRC}/utils/CommonUtil.cpp ${MILVUS_ENGINE_SRC}/utils/TimeRecorder.cpp ${MILVUS_ENGINE_SRC}/utils/Status.cpp + ${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp ${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc ) diff --git a/core/unittest/server/test_config.cpp b/core/unittest/server/test_config.cpp index caaa66f979..5e6f61e543 100644 --- a/core/unittest/server/test_config.cpp +++ b/core/unittest/server/test_config.cpp @@ -22,6 +22,7 @@ #include "server/Config.h" #include "server/utils.h" #include "utils/CommonUtil.h" +#include "utils/StringHelpFunctions.h" #include "utils/ValidationUtil.h" namespace { @@ -98,6 +99,326 @@ TEST_F(ConfigTest, CONFIG_TEST) { ASSERT_TRUE(seqs.empty()); } +TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) { + std::string config_path(CONFIG_PATH); + milvus::server::Config& config = milvus::server::Config::GetInstance(); + milvus::Status s; + std::string str_val; + int32_t int32_val; + int64_t int64_val; + float float_val; + bool bool_val; + + /* server config */ + std::string server_addr = "192.168.1.155"; + s = config.SetServerConfigAddress(server_addr); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigAddress(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_addr); + + std::string server_port = "12345"; + s = config.SetServerConfigPort(server_port); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigPort(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_port); + + std::string server_mode = "cluster_readonly"; + s = config.SetServerConfigDeployMode(server_mode); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigDeployMode(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_mode); + + std::string server_time_zone = "UTC+6"; + s = config.SetServerConfigTimeZone(server_time_zone); + ASSERT_TRUE(s.ok()); + s = config.GetServerConfigTimeZone(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == server_time_zone); + + /* db config */ + std::string db_primary_path = "/home/zilliz"; + s = config.SetDBConfigPrimaryPath(db_primary_path); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigPrimaryPath(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == db_primary_path); + + std::string db_secondary_path = "/home/zilliz"; + s = config.SetDBConfigSecondaryPath(db_secondary_path); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigSecondaryPath(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == db_secondary_path); + + std::string db_backend_url = "mysql://root:123456@127.0.0.1:19530/milvus"; + s = config.SetDBConfigBackendUrl(db_backend_url); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigBackendUrl(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == db_backend_url); + + int32_t db_archive_disk_threshold = 100; + s = config.SetDBConfigArchiveDiskThreshold(std::to_string(db_archive_disk_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigArchiveDiskThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == db_archive_disk_threshold); + + int32_t db_archive_days_threshold = 365; + s = config.SetDBConfigArchiveDaysThreshold(std::to_string(db_archive_days_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigArchiveDaysThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == db_archive_days_threshold); + + int32_t db_insert_buffer_size = 2; + s = config.SetDBConfigInsertBufferSize(std::to_string(db_insert_buffer_size)); + ASSERT_TRUE(s.ok()); + s = config.GetDBConfigInsertBufferSize(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == db_insert_buffer_size); + + /* metric config */ + bool metric_enable_monitor = false; + s = config.SetMetricConfigEnableMonitor(std::to_string(metric_enable_monitor)); + ASSERT_TRUE(s.ok()); + s = config.GetMetricConfigEnableMonitor(bool_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(bool_val == metric_enable_monitor); + + std::string metric_collector = "prometheus"; + s = config.SetMetricConfigCollector(metric_collector); + ASSERT_TRUE(s.ok()); + s = config.GetMetricConfigCollector(str_val); + ASSERT_TRUE(str_val == metric_collector); + + std::string metric_prometheus_port = "2222"; + s = config.SetMetricConfigPrometheusPort(metric_prometheus_port); + ASSERT_TRUE(s.ok()); + s = config.GetMetricConfigPrometheusPort(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == metric_prometheus_port); + + /* cache config */ + int64_t cache_cpu_cache_capacity = 5; + s = config.SetCacheConfigCpuCacheCapacity(std::to_string(cache_cpu_cache_capacity)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigCpuCacheCapacity(int64_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int64_val == cache_cpu_cache_capacity); + + float cache_cpu_cache_threshold = 0.1; + s = config.SetCacheConfigCpuCacheThreshold(std::to_string(cache_cpu_cache_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigCpuCacheThreshold(float_val); + ASSERT_TRUE(float_val == cache_cpu_cache_threshold); + + int64_t cache_gpu_cache_capacity = 1; + s = config.SetCacheConfigGpuCacheCapacity(std::to_string(cache_gpu_cache_capacity)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigGpuCacheCapacity(int64_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int64_val == cache_gpu_cache_capacity); + + float cache_gpu_cache_threshold = 0.2; + s = config.SetCacheConfigGpuCacheThreshold(std::to_string(cache_gpu_cache_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigGpuCacheThreshold(float_val); + ASSERT_TRUE(float_val == cache_gpu_cache_threshold); + + bool cache_insert_data = true; + s = config.SetCacheConfigCacheInsertData(std::to_string(cache_insert_data)); + ASSERT_TRUE(s.ok()); + s = config.GetCacheConfigCacheInsertData(bool_val); + ASSERT_TRUE(bool_val == cache_insert_data); + + /* engine config */ + int32_t engine_use_blas_threshold = 50; + s = config.SetEngineConfigUseBlasThreshold(std::to_string(engine_use_blas_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetEngineConfigUseBlasThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == engine_use_blas_threshold); + + int32_t engine_omp_thread_num = 8; + s = config.SetEngineConfigOmpThreadNum(std::to_string(engine_omp_thread_num)); + ASSERT_TRUE(s.ok()); + s = config.GetEngineConfigOmpThreadNum(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == engine_omp_thread_num); + + int32_t engine_gpu_search_threshold = 800; + s = config.SetEngineConfigGpuSearchThreshold(std::to_string(engine_gpu_search_threshold)); + ASSERT_TRUE(s.ok()); + s = config.GetEngineConfigGpuSearchThreshold(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == engine_gpu_search_threshold); + + /* resource config */ + std::string resource_mode = "simple"; + s = config.SetResourceConfigMode(resource_mode); + ASSERT_TRUE(s.ok()); + s = config.GetResourceConfigMode(str_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(str_val == resource_mode); + +#ifdef MILVUS_CPU_VERSION + std::vector search_resources = {"cpu"}; +#else + std::vector search_resources = {"cpu", "gpu0"}; +#endif + std::vector res_vec; + std::string res_str; + milvus::server::StringHelpFunctions::MergeStringWithDelimeter( + search_resources, milvus::server::CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, res_str); + s = config.SetResourceConfigSearchResources(res_str); + ASSERT_TRUE(s.ok()); + s = config.GetResourceConfigSearchResources(res_vec); + ASSERT_TRUE(s.ok()); + for (size_t i = 0; i < search_resources.size(); i++) { + ASSERT_TRUE(search_resources[i] == res_vec[i]); + } + +#ifdef MILVUS_CPU_VERSION + int32_t resource_index_build_device = milvus::server::CPU_DEVICE_ID; + s = config.SetResourceConfigIndexBuildDevice("cpu"); +#else + int32_t resource_index_build_device = 0; + s = config.SetResourceConfigIndexBuildDevice("gpu" + std::to_string(resource_index_build_device)); +#endif + ASSERT_TRUE(s.ok()); + s = config.GetResourceConfigIndexBuildDevice(int32_val); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(int32_val == resource_index_build_device); +} + +TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) { + std::string config_path(CONFIG_PATH); + milvus::server::Config& config = milvus::server::Config::GetInstance(); + milvus::Status s; + + s = config.LoadConfigFile(""); + ASSERT_FALSE(s.ok()); + + s = config.LoadConfigFile(config_path + INVALID_CONFIG_FILE); + ASSERT_FALSE(s.ok()); + s = config.LoadConfigFile(config_path + "dummy.yaml"); + ASSERT_FALSE(s.ok()); + + /* server config */ + s = config.SetServerConfigAddress("0.0.0"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigAddress("0.0.0.256"); + ASSERT_FALSE(s.ok()); + + s = config.SetServerConfigPort("a"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigPort("99999"); + ASSERT_FALSE(s.ok()); + + s = config.SetServerConfigDeployMode("cluster"); + ASSERT_FALSE(s.ok()); + + s = config.SetServerConfigTimeZone("GM"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigTimeZone("GMT8"); + ASSERT_FALSE(s.ok()); + s = config.SetServerConfigTimeZone("UTCA"); + ASSERT_FALSE(s.ok()); + + /* db config */ + s = config.SetDBConfigPrimaryPath(""); + ASSERT_FALSE(s.ok()); + + // s = config.SetDBConfigSecondaryPath(""); + // ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigBackendUrl("http://www.google.com"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigBackendUrl("sqlite://:@:"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigBackendUrl("mysql://root:123456@127.0.0.1/milvus"); + ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigArchiveDiskThreshold("0x10"); + ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigArchiveDaysThreshold("0x10"); + ASSERT_FALSE(s.ok()); + + s = config.SetDBConfigInsertBufferSize("a"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigInsertBufferSize("0"); + ASSERT_FALSE(s.ok()); + s = config.SetDBConfigInsertBufferSize("2048"); + ASSERT_FALSE(s.ok()); + + /* metric config */ + s = config.SetMetricConfigEnableMonitor("Y"); + ASSERT_FALSE(s.ok()); + + s = config.SetMetricConfigCollector("zilliz"); + ASSERT_FALSE(s.ok()); + + s = config.SetMetricConfigPrometheusPort("0xff"); + ASSERT_FALSE(s.ok()); + + /* cache config */ + s = config.SetCacheConfigCpuCacheCapacity("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigCpuCacheCapacity("0"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigCpuCacheCapacity("2048"); + ASSERT_FALSE(s.ok()); + + s = config.SetCacheConfigCpuCacheThreshold("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigCpuCacheThreshold("1.0"); + ASSERT_FALSE(s.ok()); + +#ifdef MILVUS_GPU_VERSION + s = config.SetCacheConfigGpuCacheCapacity("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigGpuCacheCapacity("128"); + ASSERT_FALSE(s.ok()); + + s = config.SetCacheConfigGpuCacheThreshold("a"); + ASSERT_FALSE(s.ok()); + s = config.SetCacheConfigGpuCacheThreshold("1.0"); + ASSERT_FALSE(s.ok()); +#endif + + s = config.SetCacheConfigCacheInsertData("N"); + ASSERT_FALSE(s.ok()); + + /* engine config */ + s = config.SetEngineConfigUseBlasThreshold("0xff"); + ASSERT_FALSE(s.ok()); + + s = config.SetEngineConfigOmpThreadNum("a"); + ASSERT_FALSE(s.ok()); + s = config.SetEngineConfigOmpThreadNum("10000"); + ASSERT_FALSE(s.ok()); + + s = config.SetEngineConfigGpuSearchThreshold("-1"); + ASSERT_FALSE(s.ok()); + + /* resource config */ + s = config.SetResourceConfigMode("default"); + ASSERT_FALSE(s.ok()); + + s = config.SetResourceConfigSearchResources("gpu10"); + ASSERT_FALSE(s.ok()); + + s = config.SetResourceConfigIndexBuildDevice("gup2"); + ASSERT_FALSE(s.ok()); + s = config.SetResourceConfigIndexBuildDevice("gpu16"); + ASSERT_FALSE(s.ok()); +} + TEST_F(ConfigTest, SERVER_CONFIG_TEST) { std::string config_path(CONFIG_PATH); milvus::server::Config& config = milvus::server::Config::GetInstance(); diff --git a/core/unittest/server/test_util.cpp b/core/unittest/server/test_util.cpp index 36d0ab8597..2634a4af9c 100644 --- a/core/unittest/server/test_util.cpp +++ b/core/unittest/server/test_util.cpp @@ -117,12 +117,11 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) { str = "a,b,c"; std::vector result; - auto status = milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result); - ASSERT_TRUE(status.ok()); + milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result); ASSERT_EQ(result.size(), 3UL); result.clear(); - status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result); + auto status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result); ASSERT_TRUE(status.ok()); ASSERT_EQ(result.size(), 3UL); From 7fa712e45c4d5d5ed3db5ac2ae01696d9958c256 Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Thu, 7 Nov 2019 16:11:55 +0800 Subject: [PATCH 141/196] Update DefineOptions.cmake --- core/cmake/DefineOptions.cmake | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index 4d5647b24d..6e05a12dd2 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -81,18 +81,6 @@ define_option(MILVUS_WITH_GRPC "Build with GRPC" ON) define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON) -#---------------------------------------------------------------------- -if (MSVC) - set_option_category("MSVC") - - define_option(MSVC_LINK_VERBOSE - "Pass verbose linking options when linking libraries and executables" - OFF) - - define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF) -endif () - - #---------------------------------------------------------------------- set_option_category("Test and benchmark") From dbe90c7abcb7dbe3e5284e2003b79fe796bfb714 Mon Sep 17 00:00:00 2001 From: groot Date: Thu, 7 Nov 2019 16:46:31 +0800 Subject: [PATCH 142/196] support table partition --- CHANGELOG.md | 1 + core/migration/README.md | 28 + core/migration/mysql_4_to_6.sql | 4 + core/migration/sqlite_4_to_6.sql | 4 + core/src/db/DB.h | 41 +- core/src/db/DBImpl.cpp | 484 ++- core/src/db/DBImpl.h | 56 +- core/src/db/Types.h | 5 +- core/src/db/meta/Meta.h | 44 +- core/src/db/meta/MetaTypes.h | 5 + core/src/db/meta/MySQLMetaImpl.cpp | 2698 ++++++------- core/src/db/meta/MySQLMetaImpl.h | 36 +- core/src/db/meta/SqliteMetaImpl.cpp | 1241 +++--- core/src/db/meta/SqliteMetaImpl.h | 32 +- core/src/grpc/gen-milvus/milvus.grpc.pb.cc | 730 ++-- core/src/grpc/gen-milvus/milvus.grpc.pb.h | 3533 ++++++++++------- core/src/grpc/gen-milvus/milvus.pb.cc | 1528 ++++++- core/src/grpc/gen-milvus/milvus.pb.h | 1054 ++++- core/src/grpc/milvus.proto | 274 +- core/src/scheduler/job/SearchJob.cpp | 16 +- core/src/scheduler/job/SearchJob.h | 22 +- core/src/scheduler/task/SearchTask.cpp | 115 +- core/src/scheduler/task/SearchTask.h | 5 +- core/src/sdk/examples/CMakeLists.txt | 4 +- .../examples/grpcsimple/src/ClientTest.cpp | 371 -- .../src/sdk/examples/partition/CMakeLists.txt | 34 + core/src/sdk/examples/partition/main.cpp | 79 + .../sdk/examples/partition/src/ClientTest.cpp | 205 + .../src/ClientTest.h | 0 .../{grpcsimple => simple}/CMakeLists.txt | 2 +- .../examples/{grpcsimple => simple}/main.cpp | 2 +- .../sdk/examples/simple/src/ClientTest.cpp | 209 + core/src/sdk/examples/simple/src/ClientTest.h | 26 + core/src/sdk/examples/utils/TimeRecorder.cpp | 35 + core/src/sdk/examples/utils/TimeRecorder.h | 36 + core/src/sdk/examples/utils/Utils.cpp | 223 ++ core/src/sdk/examples/utils/Utils.h | 77 + core/src/sdk/grpc/ClientProxy.cpp | 67 +- core/src/sdk/grpc/ClientProxy.h | 19 +- core/src/sdk/grpc/GrpcClient.cpp | 59 +- core/src/sdk/grpc/GrpcClient.h | 11 +- core/src/sdk/include/MilvusApi.h | 104 +- core/src/sdk/interface/ConnectionImpl.cpp | 33 +- core/src/sdk/interface/ConnectionImpl.h | 19 +- .../server/grpc_impl/GrpcRequestHandler.cpp | 36 +- .../src/server/grpc_impl/GrpcRequestHandler.h | 406 +- core/src/server/grpc_impl/GrpcRequestTask.cpp | 166 +- core/src/server/grpc_impl/GrpcRequestTask.h | 57 +- core/src/utils/StringHelpFunctions.cpp | 18 + core/src/utils/StringHelpFunctions.h | 5 + core/src/utils/ValidationUtil.cpp | 13 + core/src/utils/ValidationUtil.h | 4 + core/unittest/CMakeLists.txt | 1 + core/unittest/db/test_db.cpp | 160 +- core/unittest/db/test_db_mysql.cpp | 172 +- core/unittest/db/test_mem.cpp | 46 +- core/unittest/db/test_meta.cpp | 8 +- core/unittest/db/test_meta_mysql.cpp | 8 +- core/unittest/db/test_search.cpp | 396 +- core/unittest/metrics/test_metrics.cpp | 43 +- core/unittest/server/test_rpc.cpp | 52 +- core/unittest/server/test_util.cpp | 11 + 62 files changed, 9843 insertions(+), 5330 deletions(-) create mode 100644 core/migration/README.md create mode 100644 core/migration/mysql_4_to_6.sql create mode 100644 core/migration/sqlite_4_to_6.sql delete mode 100644 core/src/sdk/examples/grpcsimple/src/ClientTest.cpp create mode 100644 core/src/sdk/examples/partition/CMakeLists.txt create mode 100644 core/src/sdk/examples/partition/main.cpp create mode 100644 core/src/sdk/examples/partition/src/ClientTest.cpp rename core/src/sdk/examples/{grpcsimple => partition}/src/ClientTest.h (100%) rename core/src/sdk/examples/{grpcsimple => simple}/CMakeLists.txt (98%) rename core/src/sdk/examples/{grpcsimple => simple}/main.cpp (98%) create mode 100644 core/src/sdk/examples/simple/src/ClientTest.cpp create mode 100644 core/src/sdk/examples/simple/src/ClientTest.h create mode 100644 core/src/sdk/examples/utils/TimeRecorder.cpp create mode 100644 core/src/sdk/examples/utils/TimeRecorder.h create mode 100644 core/src/sdk/examples/utils/Utils.cpp create mode 100644 core/src/sdk/examples/utils/Utils.h diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d46ed6070..615e125bad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Please mark all change in change log and use the ticket from JIRA. ## Feature - \#12 - Pure CPU version for Milvus +- \#77 - Support table partition ## Improvement diff --git a/core/migration/README.md b/core/migration/README.md new file mode 100644 index 0000000000..7c318c1393 --- /dev/null +++ b/core/migration/README.md @@ -0,0 +1,28 @@ +## Data Migration + +####0.3.x +legacy data is not migrate-able for later versions + +####0.4.x +legacy data can be reused directly by 0.5.x + +legacy data can be migrated to 0.6.x + +####0.5.x +legacy data can be migrated to 0.6.x + +####0.6.x +how to migrate legacy 0.4.x/0.5.x data + +for sqlite meta: +```shell + $ sqlite3 [parth_to]/meta.sqlite < sqlite_4_to_6.sql +``` + +for mysql meta: +```shell + $ mysql -h127.0.0.1 -uroot -p123456 -Dmilvus < mysql_4_to_6.sql +``` + + + diff --git a/core/migration/mysql_4_to_6.sql b/core/migration/mysql_4_to_6.sql new file mode 100644 index 0000000000..f8a5b1b70b --- /dev/null +++ b/core/migration/mysql_4_to_6.sql @@ -0,0 +1,4 @@ +alter table Tables add column owner_table VARCHAR(255) DEFAULT '' NOT NULL; +alter table Tables add column partition_tag VARCHAR(255) DEFAULT '' NOT NULL; +alter table Tables add column version VARCHAR(64) DEFAULT '0.6.0' NOT NULL; +update Tables set version='0.6.0'; diff --git a/core/migration/sqlite_4_to_6.sql b/core/migration/sqlite_4_to_6.sql new file mode 100644 index 0000000000..2069145046 --- /dev/null +++ b/core/migration/sqlite_4_to_6.sql @@ -0,0 +1,4 @@ +alter table Tables add column 'owner_table' TEXT DEFAULT '' NOT NULL; +alter table Tables add column 'partition_tag' TEXT DEFAULT '' NOT NULL; +alter table Tables add column 'version' TEXT DEFAULT '0.6.0' NOT NULL; +update Tables set version='0.6.0'; diff --git a/core/src/db/DB.h b/core/src/db/DB.h index a790fadb50..09bbd4af45 100644 --- a/core/src/db/DB.h +++ b/core/src/db/DB.h @@ -47,43 +47,68 @@ class DB { virtual Status CreateTable(meta::TableSchema& table_schema_) = 0; + virtual Status - DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0; + DropTable(const std::string& table_id, const meta::DatesT& dates) = 0; + virtual Status DescribeTable(meta::TableSchema& table_schema_) = 0; + virtual Status HasTable(const std::string& table_id, bool& has_or_not_) = 0; + virtual Status AllTables(std::vector& table_schema_array) = 0; + virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0; + virtual Status PreloadTable(const std::string& table_id) = 0; + virtual Status UpdateTableFlag(const std::string& table_id, int64_t flag) = 0; virtual Status - InsertVectors(const std::string& table_id_, uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0; + CreatePartition(const std::string& table_id, const std::string& partition_name, + const std::string& partition_tag) = 0; virtual Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - QueryResults& results) = 0; + DropPartition(const std::string& partition_name) = 0; virtual Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, QueryResults& results) = 0; + DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0; virtual Status - Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0; + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) = 0; + + virtual Status + InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors, + IDNumbers& vector_ids_) = 0; + + virtual Status + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0; + + virtual Status + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) = 0; + + virtual Status + QueryByFileID(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) = 0; virtual Status Size(uint64_t& result) = 0; virtual Status CreateIndex(const std::string& table_id, const TableIndex& index) = 0; + virtual Status DescribeIndex(const std::string& table_id, TableIndex& index) = 0; + virtual Status DropIndex(const std::string& table_id) = 0; diff --git a/core/src/db/DBImpl.cpp b/core/src/db/DBImpl.cpp index 6995de3d14..3e0501b84e 100644 --- a/core/src/db/DBImpl.cpp +++ b/core/src/db/DBImpl.cpp @@ -30,6 +30,7 @@ #include "scheduler/job/DeleteJob.h" #include "scheduler/job/SearchJob.h" #include "utils/Log.h" +#include "utils/StringHelpFunctions.h" #include "utils/TimeRecorder.h" #include @@ -38,6 +39,7 @@ #include #include #include +#include #include namespace milvus { @@ -49,6 +51,17 @@ constexpr uint64_t METRIC_ACTION_INTERVAL = 1; constexpr uint64_t COMPACT_ACTION_INTERVAL = 1; constexpr uint64_t INDEX_ACTION_INTERVAL = 1; +static const Status SHUTDOWN_ERROR = Status(DB_ERROR, "Milsvus server is shutdown!"); + +void +TraverseFiles(const meta::DatePartionedTableFilesSchema& date_files, meta::TableFilesSchema& files_array) { + for (auto& day_files : date_files) { + for (auto& file : day_files.second) { + files_array.push_back(file); + } + } +} + } // namespace DBImpl::DBImpl(const DBOptions& options) @@ -113,7 +126,7 @@ DBImpl::DropAll() { Status DBImpl::CreateTable(meta::TableSchema& table_schema) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } meta::TableSchema temp_schema = table_schema; @@ -122,34 +135,18 @@ DBImpl::CreateTable(meta::TableSchema& table_schema) { } Status -DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) { +DBImpl::DropTable(const std::string& table_id, const meta::DatesT& dates) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } - // dates partly delete files of the table but currently we don't support - ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id; - - if (dates.empty()) { - mem_mgr_->EraseMemVector(table_id); // not allow insert - meta_ptr_->DeleteTable(table_id); // soft delete table - - // scheduler will determine when to delete table files - auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); - scheduler::DeleteJobPtr job = std::make_shared(table_id, meta_ptr_, nres); - scheduler::JobMgrInst::GetInstance()->Put(job); - job->WaitAndDelete(); - } else { - meta_ptr_->DropPartitionsByDates(table_id, dates); - } - - return Status::OK(); + return DropTableRecursively(table_id, dates); } Status DBImpl::DescribeTable(meta::TableSchema& table_schema) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } auto stat = meta_ptr_->DescribeTable(table_schema); @@ -160,7 +157,7 @@ DBImpl::DescribeTable(meta::TableSchema& table_schema) { Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->HasTable(table_id, has_or_not); @@ -169,7 +166,7 @@ DBImpl::HasTable(const std::string& table_id, bool& has_or_not) { Status DBImpl::AllTables(std::vector& table_schema_array) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->AllTables(table_schema_array); @@ -178,55 +175,59 @@ DBImpl::AllTables(std::vector& table_schema_array) { Status DBImpl::PreloadTable(const std::string& table_id) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } - meta::DatePartionedTableFilesSchema files; - - meta::DatesT dates; + // get all table files from parent table std::vector ids; - auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files); + meta::TableFilesSchema files_array; + auto status = GetFilesToSearch(table_id, ids, files_array); if (!status.ok()) { return status; } + // get files from partition tables + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = GetFilesToSearch(schema.table_id_, ids, files_array); + } + int64_t size = 0; int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity(); int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage(); int64_t available_size = cache_total - cache_usage; - for (auto& day_files : files) { - for (auto& file : day_files.second) { - ExecutionEnginePtr engine = - EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_, - (MetricType)file.metric_type_, file.nlist_); - if (engine == nullptr) { - ENGINE_LOG_ERROR << "Invalid engine type"; - return Status(DB_ERROR, "Invalid engine type"); - } + for (auto& file : files_array) { + ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_, + (MetricType)file.metric_type_, file.nlist_); + if (engine == nullptr) { + ENGINE_LOG_ERROR << "Invalid engine type"; + return Status(DB_ERROR, "Invalid engine type"); + } - size += engine->PhysicalSize(); - if (size > available_size) { - return Status(SERVER_CACHE_FULL, "Cache is full"); - } else { - try { - // step 1: load index - engine->Load(true); - } catch (std::exception& ex) { - std::string msg = "Pre-load table encounter exception: " + std::string(ex.what()); - ENGINE_LOG_ERROR << msg; - return Status(DB_ERROR, msg); - } + size += engine->PhysicalSize(); + if (size > available_size) { + return Status(SERVER_CACHE_FULL, "Cache is full"); + } else { + try { + // step 1: load index + engine->Load(true); + } catch (std::exception& ex) { + std::string msg = "Pre-load table encounter exception: " + std::string(ex.what()); + ENGINE_LOG_ERROR << msg; + return Status(DB_ERROR, msg); } } } + return Status::OK(); } Status DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->UpdateTableFlag(table_id, flag); @@ -235,34 +236,96 @@ DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } - return meta_ptr_->Count(table_id, row_count); + return GetTableRowCountRecursively(table_id, row_count); } Status -DBImpl::InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) { - // ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache"; +DBImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, + const std::string& partition_tag) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } + return meta_ptr_->CreatePartition(table_id, partition_name, partition_tag); +} + +Status +DBImpl::DropPartition(const std::string& partition_name) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + auto status = mem_mgr_->EraseMemVector(partition_name); // not allow insert + status = meta_ptr_->DropPartition(partition_name); // soft delete table + + // scheduler will determine when to delete table files + auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); + scheduler::DeleteJobPtr job = std::make_shared(partition_name, meta_ptr_, nres); + scheduler::JobMgrInst::GetInstance()->Put(job); + job->WaitAndDelete(); + + return Status::OK(); +} + +Status +DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + std::string partition_name; + auto status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name); + return DropPartition(partition_name); +} + +Status +DBImpl::ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + return meta_ptr_->ShowPartitions(table_id, partiton_schema_array); +} + +Status +DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors, + IDNumbers& vector_ids) { + // ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache"; + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + // if partition is specified, use partition as target table Status status; + std::string target_table_name = table_id; + if (!partition_tag.empty()) { + std::string partition_name; + status = meta_ptr_->GetPartitionName(table_id, partition_tag, target_table_name); + } + + // insert vectors into target table milvus::server::CollectInsertMetrics metrics(n, status); - status = mem_mgr_->InsertVectors(table_id, n, vectors, vector_ids); + status = mem_mgr_->InsertVectors(target_table_name, n, vectors, vector_ids); return status; } Status DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + + Status status; { std::unique_lock lock(build_index_mutex_); // step 1: check index difference TableIndex old_index; - auto status = DescribeIndex(table_id, old_index); + status = DescribeIndex(table_id, old_index); if (!status.ok()) { ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id; return status; @@ -272,11 +335,8 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { TableIndex new_index = index; new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable if (!utils::IsSameIndex(old_index, new_index)) { - DropIndex(table_id); - - status = meta_ptr_->UpdateTableIndex(table_id, new_index); + status = UpdateTableIndexRecursively(table_id, new_index); if (!status.ok()) { - ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id; return status; } } @@ -287,101 +347,91 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) { WaitMergeFileFinish(); // step 4: wait and build index - // for IDMAP type, only wait all NEW file converted to RAW file - // for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files - std::vector file_types; - if (index.engine_type_ == static_cast(EngineType::FAISS_IDMAP)) { - file_types = { - static_cast(meta::TableFileSchema::NEW), - static_cast(meta::TableFileSchema::NEW_MERGE), - }; - } else { - file_types = { - static_cast(meta::TableFileSchema::RAW), - static_cast(meta::TableFileSchema::NEW), - static_cast(meta::TableFileSchema::NEW_MERGE), - static_cast(meta::TableFileSchema::NEW_INDEX), - static_cast(meta::TableFileSchema::TO_INDEX), - }; - } + status = BuildTableIndexRecursively(table_id, index); - std::vector file_ids; - auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids); - int times = 1; - - while (!file_ids.empty()) { - ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times; - if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) { - status = meta_ptr_->UpdateTableFilesToIndex(table_id); - } - - std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100))); - status = meta_ptr_->FilesByType(table_id, file_types, file_ids); - times++; - } - - return Status::OK(); + return status; } Status DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + return meta_ptr_->DescribeTableIndex(table_id, index); } Status DBImpl::DropIndex(const std::string& table_id) { + if (shutting_down_.load(std::memory_order_acquire)) { + return SHUTDOWN_ERROR; + } + ENGINE_LOG_DEBUG << "Drop index for table: " << table_id; - return meta_ptr_->DropTableIndex(table_id); + return DropTableIndexRecursively(table_id); } Status -DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - QueryResults& results) { +DBImpl::Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } meta::DatesT dates = {utils::GetDate()}; - Status result = Query(table_id, k, nq, nprobe, vectors, dates, results); - + Status result = Query(table_id, partition_tags, k, nq, nprobe, vectors, dates, result_ids, result_distances); return result; } Status -DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, QueryResults& results) { +DBImpl::Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id << " date range count: " << dates.size(); - // get all table files from table - meta::DatePartionedTableFilesSchema files; + Status status; std::vector ids; - auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files); - if (!status.ok()) { - return status; - } + meta::TableFilesSchema files_array; - meta::TableFilesSchema file_id_array; - for (auto& day_files : files) { - for (auto& file : day_files.second) { - file_id_array.push_back(file); + if (partition_tags.empty()) { + // no partition tag specified, means search in whole table + // get all table files from parent table + status = GetFilesToSearch(table_id, ids, files_array); + if (!status.ok()) { + return status; + } + + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = GetFilesToSearch(schema.table_id_, ids, files_array); + } + } else { + // get files from specified partitions + std::set partition_name_array; + GetPartitionsByTags(table_id, partition_tags, partition_name_array); + + for (auto& partition_name : partition_name_array) { + status = GetFilesToSearch(partition_name, ids, files_array); } } cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query - status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results); + status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances); cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query return status; } Status -DBImpl::Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) { +DBImpl::QueryByFileID(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id << " date range count: " << dates.size(); @@ -395,25 +445,18 @@ DBImpl::Query(const std::string& table_id, const std::vector& file_ ids.push_back(std::stoul(id, &sz)); } - meta::DatePartionedTableFilesSchema files_array; - auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files_array); + meta::TableFilesSchema files_array; + auto status = GetFilesToSearch(table_id, ids, files_array); if (!status.ok()) { return status; } - meta::TableFilesSchema file_id_array; - for (auto& day_files : files_array) { - for (auto& file : day_files.second) { - file_id_array.push_back(file); - } - } - - if (file_id_array.empty()) { + if (files_array.empty()) { return Status(DB_ERROR, "Invalid file id"); } cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query - status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results); + status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances); cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query return status; } @@ -421,7 +464,7 @@ DBImpl::Query(const std::string& table_id, const std::vector& file_ Status DBImpl::Size(uint64_t& result) { if (shutting_down_.load(std::memory_order_acquire)) { - return Status(DB_ERROR, "Milsvus server is shutdown!"); + return SHUTDOWN_ERROR; } return meta_ptr_->Size(result); @@ -432,7 +475,7 @@ DBImpl::Size(uint64_t& result) { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Status DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, QueryResults& results) { + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) { server::CollectQueryMetrics metrics(nq); TimeRecorder rc(""); @@ -453,7 +496,8 @@ DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& fi } // step 3: construct results - results = job->GetResult(); + result_ids = job->GetResultIds(); + result_distances = job->GetResultDistances(); rc.ElapseFromBegin("Engine query totally cost"); return Status::OK(); @@ -772,5 +816,183 @@ DBImpl::BackgroundBuildIndex() { ENGINE_LOG_TRACE << "Background build index thread exit"; } +Status +DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector& file_ids, + meta::TableFilesSchema& files) { + meta::DatesT dates; + meta::DatePartionedTableFilesSchema date_files; + auto status = meta_ptr_->FilesToSearch(table_id, file_ids, dates, date_files); + if (!status.ok()) { + return status; + } + + TraverseFiles(date_files, files); + return Status::OK(); +} + +Status +DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vector& partition_tags, + std::set& partition_name_array) { + std::vector partiton_array; + auto status = meta_ptr_->ShowPartitions(table_id, partiton_array); + + for (auto& tag : partition_tags) { + for (auto& schema : partiton_array) { + if (server::StringHelpFunctions::IsRegexMatch(schema.partition_tag_, tag)) { + partition_name_array.insert(schema.table_id_); + } + } + } + + return Status::OK(); +} + +Status +DBImpl::DropTableRecursively(const std::string& table_id, const meta::DatesT& dates) { + // dates partly delete files of the table but currently we don't support + ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id; + + Status status; + if (dates.empty()) { + status = mem_mgr_->EraseMemVector(table_id); // not allow insert + status = meta_ptr_->DropTable(table_id); // soft delete table + + // scheduler will determine when to delete table files + auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource(); + scheduler::DeleteJobPtr job = std::make_shared(table_id, meta_ptr_, nres); + scheduler::JobMgrInst::GetInstance()->Put(job); + job->WaitAndDelete(); + } else { + status = meta_ptr_->DropDataByDate(table_id, dates); + } + + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = DropTableRecursively(schema.table_id_, dates); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index) { + DropIndex(table_id); + + auto status = meta_ptr_->UpdateTableIndex(table_id, index); + if (!status.ok()) { + ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id; + return status; + } + + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = UpdateTableIndexRecursively(schema.table_id_, index); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index) { + // for IDMAP type, only wait all NEW file converted to RAW file + // for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files + std::vector file_types; + if (index.engine_type_ == static_cast(EngineType::FAISS_IDMAP)) { + file_types = { + static_cast(meta::TableFileSchema::NEW), + static_cast(meta::TableFileSchema::NEW_MERGE), + }; + } else { + file_types = { + static_cast(meta::TableFileSchema::RAW), + static_cast(meta::TableFileSchema::NEW), + static_cast(meta::TableFileSchema::NEW_MERGE), + static_cast(meta::TableFileSchema::NEW_INDEX), + static_cast(meta::TableFileSchema::TO_INDEX), + }; + } + + // get files to build index + std::vector file_ids; + auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids); + int times = 1; + + while (!file_ids.empty()) { + ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times; + if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) { + status = meta_ptr_->UpdateTableFilesToIndex(table_id); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100))); + status = meta_ptr_->FilesByType(table_id, file_types, file_ids); + times++; + } + + // build index for partition + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = BuildTableIndexRecursively(schema.table_id_, index); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::DropTableIndexRecursively(const std::string& table_id) { + ENGINE_LOG_DEBUG << "Drop index for table: " << table_id; + auto status = meta_ptr_->DropTableIndex(table_id); + if (!status.ok()) { + return status; + } + + // drop partition index + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + status = DropTableIndexRecursively(schema.table_id_); + if (!status.ok()) { + return status; + } + } + + return Status::OK(); +} + +Status +DBImpl::GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count) { + row_count = 0; + auto status = meta_ptr_->Count(table_id, row_count); + if (!status.ok()) { + return status; + } + + // get partition row count + std::vector partiton_array; + status = meta_ptr_->ShowPartitions(table_id, partiton_array); + for (auto& schema : partiton_array) { + uint64_t partition_row_count = 0; + status = GetTableRowCountRecursively(schema.table_id_, partition_row_count); + if (!status.ok()) { + return status; + } + + row_count += partition_row_count; + } + + return Status::OK(); +} + } // namespace engine } // namespace milvus diff --git a/core/src/db/DBImpl.h b/core/src/db/DBImpl.h index e1e030cc32..932fc990e4 100644 --- a/core/src/db/DBImpl.h +++ b/core/src/db/DBImpl.h @@ -57,7 +57,7 @@ class DBImpl : public DB { CreateTable(meta::TableSchema& table_schema) override; Status - DeleteTable(const std::string& table_id, const meta::DatesT& dates) override; + DropTable(const std::string& table_id, const meta::DatesT& dates) override; Status DescribeTable(meta::TableSchema& table_schema) override; @@ -78,7 +78,21 @@ class DBImpl : public DB { GetTableRowCount(const std::string& table_id, uint64_t& row_count) override; Status - InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) override; + CreatePartition(const std::string& table_id, const std::string& partition_name, + const std::string& partition_tag) override; + + Status + DropPartition(const std::string& partition_name) override; + + Status + DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override; + + Status + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) override; + + Status + InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors, + IDNumbers& vector_ids) override; Status CreateIndex(const std::string& table_id, const TableIndex& index) override; @@ -90,16 +104,18 @@ class DBImpl : public DB { DropIndex(const std::string& table_id) override; Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - QueryResults& results) override; + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) override; Status - Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors, - const meta::DatesT& dates, QueryResults& results) override; + Query(const std::string& table_id, const std::vector& partition_tags, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) override; Status - Query(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) override; + QueryByFileID(const std::string& table_id, const std::vector& file_ids, uint64_t k, uint64_t nq, + uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids, + ResultDistances& result_distances) override; Status Size(uint64_t& result) override; @@ -107,7 +123,7 @@ class DBImpl : public DB { private: Status QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq, - uint64_t nprobe, const float* vectors, QueryResults& results); + uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances); void BackgroundTimerTask(); @@ -136,6 +152,28 @@ class DBImpl : public DB { Status MemSerialize(); + Status + GetFilesToSearch(const std::string& table_id, const std::vector& file_ids, meta::TableFilesSchema& files); + + Status + GetPartitionsByTags(const std::string& table_id, const std::vector& partition_tags, + std::set& partition_name_array); + + Status + DropTableRecursively(const std::string& table_id, const meta::DatesT& dates); + + Status + UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index); + + Status + BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index); + + Status + DropTableIndexRecursively(const std::string& table_id); + + Status + GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count); + private: const DBOptions options_; diff --git a/core/src/db/Types.h b/core/src/db/Types.h index 94528a9a8a..76c06126f8 100644 --- a/core/src/db/Types.h +++ b/core/src/db/Types.h @@ -19,6 +19,7 @@ #include "db/engine/ExecutionEngine.h" +#include #include #include #include @@ -30,8 +31,8 @@ typedef int64_t IDNumber; typedef IDNumber* IDNumberPtr; typedef std::vector IDNumbers; -typedef std::vector> QueryResult; -typedef std::vector QueryResults; +typedef std::vector ResultIds; +typedef std::vector ResultDistances; struct TableIndex { int32_t engine_type_ = (int)EngineType::FAISS_IDMAP; diff --git a/core/src/db/meta/Meta.h b/core/src/db/meta/Meta.h index ec4b66916d..f538bebce6 100644 --- a/core/src/db/meta/Meta.h +++ b/core/src/db/meta/Meta.h @@ -50,14 +50,11 @@ class Meta { virtual Status AllTables(std::vector& table_schema_array) = 0; - virtual Status - UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0; - virtual Status UpdateTableFlag(const std::string& table_id, int64_t flag) = 0; virtual Status - DeleteTable(const std::string& table_id) = 0; + DropTable(const std::string& table_id) = 0; virtual Status DeleteTableFiles(const std::string& table_id) = 0; @@ -66,20 +63,41 @@ class Meta { CreateTableFile(TableFileSchema& file_schema) = 0; virtual Status - DropPartitionsByDates(const std::string& table_id, const DatesT& dates) = 0; + DropDataByDate(const std::string& table_id, const DatesT& dates) = 0; virtual Status GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) = 0; - virtual Status - UpdateTableFilesToIndex(const std::string& table_id) = 0; - virtual Status UpdateTableFile(TableFileSchema& file_schema) = 0; virtual Status UpdateTableFiles(TableFilesSchema& files) = 0; + virtual Status + UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0; + + virtual Status + UpdateTableFilesToIndex(const std::string& table_id) = 0; + + virtual Status + DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0; + + virtual Status + DropTableIndex(const std::string& table_id) = 0; + + virtual Status + CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag) = 0; + + virtual Status + DropPartition(const std::string& partition_name) = 0; + + virtual Status + ShowPartitions(const std::string& table_name, std::vector& partiton_schema_array) = 0; + + virtual Status + GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0; + virtual Status FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, DatePartionedTableFilesSchema& files) = 0; @@ -87,12 +105,6 @@ class Meta { virtual Status FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) = 0; - virtual Status - Size(uint64_t& result) = 0; - - virtual Status - Archive() = 0; - virtual Status FilesToIndex(TableFilesSchema&) = 0; @@ -101,10 +113,10 @@ class Meta { std::vector& file_ids) = 0; virtual Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0; + Size(uint64_t& result) = 0; virtual Status - DropTableIndex(const std::string& table_id) = 0; + Archive() = 0; virtual Status CleanUp() = 0; diff --git a/core/src/db/meta/MetaTypes.h b/core/src/db/meta/MetaTypes.h index c973f3fdea..28f35e76fc 100644 --- a/core/src/db/meta/MetaTypes.h +++ b/core/src/db/meta/MetaTypes.h @@ -19,6 +19,7 @@ #include "db/Constants.h" #include "db/engine/ExecutionEngine.h" +#include "src/config.h" #include #include @@ -33,6 +34,7 @@ constexpr int32_t DEFAULT_ENGINE_TYPE = (int)EngineType::FAISS_IDMAP; constexpr int32_t DEFAULT_NLIST = 16384; constexpr int32_t DEFAULT_METRIC_TYPE = (int)MetricType::L2; constexpr int32_t DEFAULT_INDEX_FILE_SIZE = ONE_GB; +constexpr char CURRENT_VERSION[] = MILVUS_VERSION; constexpr int64_t FLAG_MASK_NO_USERID = 0x1; constexpr int64_t FLAG_MASK_HAS_USERID = 0x1 << 1; @@ -57,6 +59,9 @@ struct TableSchema { int32_t engine_type_ = DEFAULT_ENGINE_TYPE; int32_t nlist_ = DEFAULT_NLIST; int32_t metric_type_ = DEFAULT_METRIC_TYPE; + std::string owner_table_; + std::string partition_tag_; + std::string version_ = CURRENT_VERSION; }; // TableSchema struct TableFileSchema { diff --git a/core/src/db/meta/MySQLMetaImpl.cpp b/core/src/db/meta/MySQLMetaImpl.cpp index c7a054524c..ff36554c10 100644 --- a/core/src/db/meta/MySQLMetaImpl.cpp +++ b/core/src/db/meta/MySQLMetaImpl.cpp @@ -145,6 +145,10 @@ static const MetaSchema TABLES_SCHEMA(META_TABLES, { MetaField("engine_type", "INT", "DEFAULT 1 NOT NULL"), MetaField("nlist", "INT", "DEFAULT 16384 NOT NULL"), MetaField("metric_type", "INT", "DEFAULT 1 NOT NULL"), + MetaField("owner_table", "VARCHAR(255)", "NOT NULL"), + MetaField("partition_tag", "VARCHAR(255)", "NOT NULL"), + MetaField("version", "VARCHAR(64)", + std::string("DEFAULT '") + CURRENT_VERSION + "'"), }); // TableFiles schema @@ -294,7 +298,7 @@ MySQLMetaImpl::Initialize() { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } if (!connectionPtr->thread_aware()) { @@ -328,9 +332,350 @@ MySQLMetaImpl::Initialize() { return Status::OK(); } +Status +MySQLMetaImpl::CreateTable(TableSchema& table_schema) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query createTableQuery = connectionPtr->query(); + + if (table_schema.table_id_.empty()) { + NextTableId(table_schema.table_id_); + } else { + createTableQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << table_schema.table_id_ << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); + + mysqlpp::StoreQueryResult res = createTableQuery.store(); + + if (res.num_rows() == 1) { + int state = res[0]["state"]; + if (TableSchema::TO_DELETE == state) { + return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); + } else { + return Status(DB_ALREADY_EXIST, "Table already exists"); + } + } + } + + table_schema.id_ = -1; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + + std::string id = "NULL"; // auto-increment + std::string& table_id = table_schema.table_id_; + std::string state = std::to_string(table_schema.state_); + std::string dimension = std::to_string(table_schema.dimension_); + std::string created_on = std::to_string(table_schema.created_on_); + std::string flag = std::to_string(table_schema.flag_); + std::string index_file_size = std::to_string(table_schema.index_file_size_); + std::string engine_type = std::to_string(table_schema.engine_type_); + std::string nlist = std::to_string(table_schema.nlist_); + std::string metric_type = std::to_string(table_schema.metric_type_); + std::string& owner_table = table_schema.owner_table_; + std::string& partition_tag = table_schema.partition_tag_; + std::string& version = table_schema.version_; + + createTableQuery << "INSERT INTO " << META_TABLES << " VALUES(" << id << ", " << mysqlpp::quote << table_id + << ", " << state << ", " << dimension << ", " << created_on << ", " << flag << ", " + << index_file_size << ", " << engine_type << ", " << nlist << ", " << metric_type << ", " + << mysqlpp::quote << owner_table << ", " << mysqlpp::quote << partition_tag << ", " + << mysqlpp::quote << version << ");"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); + + if (mysqlpp::SimpleResult res = createTableQuery.execute()) { + table_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? + + // Consume all results to avoid "Commands out of sync" error + } else { + return HandleException("Add Table Error", createTableQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; + return utils::CreateTablePath(options_, table_schema.table_id_); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CREATING TABLE", e.what()); + } +} + +Status +MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query describeTableQuery = connectionPtr->query(); + describeTableQuery + << "SELECT id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type" + << " ,owner_table, partition_tag, version" + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTable: " << describeTableQuery.str(); + + res = describeTableQuery.store(); + } // Scoped Connection + + if (res.num_rows() == 1) { + const mysqlpp::Row& resRow = res[0]; + table_schema.id_ = resRow["id"]; // implicit conversion + table_schema.state_ = resRow["state"]; + table_schema.dimension_ = resRow["dimension"]; + table_schema.created_on_ = resRow["created_on"]; + table_schema.flag_ = resRow["flag"]; + table_schema.index_file_size_ = resRow["index_file_size"]; + table_schema.engine_type_ = resRow["engine_type"]; + table_schema.nlist_ = resRow["nlist"]; + table_schema.metric_type_ = resRow["metric_type"]; + resRow["owner_table"].to_string(table_schema.owner_table_); + resRow["partition_tag"].to_string(table_schema.partition_tag_); + resRow["version"].to_string(table_schema.version_); + } else { + return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DESCRIBING TABLE", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query hasTableQuery = connectionPtr->query(); + // since table_id is a unique column we just need to check whether it exists or not + hasTableQuery << "SELECT EXISTS" + << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ")" + << " AS " << mysqlpp::quote << "check" + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::HasTable: " << hasTableQuery.str(); + + res = hasTableQuery.store(); + } // Scoped Connection + + int check = res[0]["check"]; + has_or_not = (check == 1); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CHECKING IF TABLE EXISTS", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::AllTables(std::vector& table_schema_array) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query allTablesQuery = connectionPtr->query(); + allTablesQuery << "SELECT id, table_id, dimension, engine_type, nlist, index_file_size, metric_type" + << " ,owner_table, partition_tag, version" + << " FROM " << META_TABLES << " WHERE state <> " << std::to_string(TableSchema::TO_DELETE) + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allTablesQuery.str(); + + res = allTablesQuery.store(); + } // Scoped Connection + + for (auto& resRow : res) { + TableSchema table_schema; + table_schema.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_schema.table_id_); + table_schema.dimension_ = resRow["dimension"]; + table_schema.index_file_size_ = resRow["index_file_size"]; + table_schema.engine_type_ = resRow["engine_type"]; + table_schema.nlist_ = resRow["nlist"]; + table_schema.metric_type_ = resRow["metric_type"]; + resRow["owner_table"].to_string(table_schema.owner_table_); + resRow["partition_tag"].to_string(table_schema.partition_tag_); + resRow["version"].to_string(table_schema.version_); + + table_schema_array.emplace_back(table_schema); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DESCRIBING ALL TABLES", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DropTable(const std::string& table_id) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + // soft delete table + mysqlpp::Query deleteTableQuery = connectionPtr->query(); + // + deleteTableQuery << "UPDATE " << META_TABLES << " SET state = " << std::to_string(TableSchema::TO_DELETE) + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTable: " << deleteTableQuery.str(); + + if (!deleteTableQuery.exec()) { + return HandleException("QUERY ERROR WHEN DELETING TABLE", deleteTableQuery.error()); + } + } // Scoped Connection + + if (mode_ == DBOptions::MODE::CLUSTER_WRITABLE) { + DeleteTableFiles(table_id); + } + + ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DELETING TABLE", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + // soft delete table files + mysqlpp::Query deleteTableFilesQuery = connectionPtr->query(); + // + deleteTableFilesQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) + << " WHERE table_id = " << mysqlpp::quote << table_id << " AND file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTableFiles: " << deleteTableFilesQuery.str(); + + if (!deleteTableFilesQuery.exec()) { + return HandleException("QUERY ERROR WHEN DELETING TABLE FILES", deleteTableFilesQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DELETING TABLE FILES", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { + if (file_schema.date_ == EmptyDate) { + file_schema.date_ = utils::GetDate(); + } + TableSchema table_schema; + table_schema.table_id_ = file_schema.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + try { + server::MetricCollector metric; + + NextFileId(file_schema.file_id_); + file_schema.dimension_ = table_schema.dimension_; + file_schema.file_size_ = 0; + file_schema.row_count_ = 0; + file_schema.created_on_ = utils::GetMicroSecTimeStamp(); + file_schema.updated_time_ = file_schema.created_on_; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.engine_type_ = table_schema.engine_type_; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; + + std::string id = "NULL"; // auto-increment + std::string table_id = file_schema.table_id_; + std::string engine_type = std::to_string(file_schema.engine_type_); + std::string file_id = file_schema.file_id_; + std::string file_type = std::to_string(file_schema.file_type_); + std::string file_size = std::to_string(file_schema.file_size_); + std::string row_count = std::to_string(file_schema.row_count_); + std::string updated_time = std::to_string(file_schema.updated_time_); + std::string created_on = std::to_string(file_schema.created_on_); + std::string date = std::to_string(file_schema.date_); + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query createTableFileQuery = connectionPtr->query(); + + createTableFileQuery << "INSERT INTO " << META_TABLEFILES << " VALUES(" << id << ", " << mysqlpp::quote + << table_id << ", " << engine_type << ", " << mysqlpp::quote << file_id << ", " + << file_type << ", " << file_size << ", " << row_count << ", " << updated_time << ", " + << created_on << ", " << date << ");"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTableFile: " << createTableFileQuery.str(); + + if (mysqlpp::SimpleResult res = createTableFileQuery.execute()) { + file_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? + + // Consume all results to avoid "Commands out of sync" error + } else { + return HandleException("QUERY ERROR WHEN CREATING TABLE FILE", createTableFileQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; + return utils::CreateTableFilePath(options_, file_schema); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CREATING TABLE FILE", e.what()); + } +} + // TODO(myh): Delete single vecotor by id Status -MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& dates) { +MySQLMetaImpl::DropDataByDate(const std::string& table_id, const DatesT& dates) { if (dates.empty()) { return Status::OK(); } @@ -354,18 +699,18 @@ MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query dropPartitionsByDatesQuery = connectionPtr->query(); - dropPartitionsByDatesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << "," - << "updated_time = " << utils::GetMicroSecTimeStamp() << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "date in (" << dateListStr << ");"; + dropPartitionsByDatesQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << utils::GetMicroSecTimeStamp() + << " WHERE table_id = " << mysqlpp::quote << table_id << " AND date in (" + << dateListStr << ");"; - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropPartitionsByDates: " << dropPartitionsByDatesQuery.str(); + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropDataByDate: " << dropPartitionsByDatesQuery.str(); if (!dropPartitionsByDatesQuery.exec()) { return HandleException("QUERY ERROR WHEN DROPPING PARTITIONS BY DATES", @@ -373,7 +718,7 @@ MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully drop partitions, table id = " << table_schema.table_id_; + ENGINE_LOG_DEBUG << "Successfully drop data by date, table id = " << table_schema.table_id_; } catch (std::exception& e) { return HandleException("GENERAL ERROR WHEN DROPPING PARTITIONS BY DATES", e.what()); } @@ -381,72 +726,782 @@ MySQLMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& } Status -MySQLMetaImpl::CreateTable(TableSchema& table_schema) { +MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, + TableFilesSchema& table_files) { + if (ids.empty()) { + return Status::OK(); + } + + std::stringstream idSS; + for (auto& id : ids) { + idSS << "id = " << std::to_string(id) << " OR "; + } + std::string idStr = idSS.str(); + idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " + + try { + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query getTableFileQuery = connectionPtr->query(); + getTableFileQuery << "SELECT id, engine_type, file_id, file_type, file_size, row_count, date, created_on" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND (" << idStr << ")" + << " AND file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFiles: " << getTableFileQuery.str(); + + res = getTableFileQuery.store(); + } // Scoped Connection + + TableSchema table_schema; + table_schema.table_id_ = table_id; + DescribeTable(table_schema); + + Status ret; + for (auto& resRow : res) { + TableFileSchema file_schema; + file_schema.id_ = resRow["id"]; + file_schema.table_id_ = table_id; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.engine_type_ = resRow["engine_type"]; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; + resRow["file_id"].to_string(file_schema.file_id_); + file_schema.file_type_ = resRow["file_type"]; + file_schema.file_size_ = resRow["file_size"]; + file_schema.row_count_ = resRow["row_count"]; + file_schema.date_ = resRow["date"]; + file_schema.created_on_ = resRow["created_on"]; + file_schema.dimension_ = table_schema.dimension_; + + utils::GetTableFilePath(options_, file_schema); + table_files.emplace_back(file_schema); + } + + ENGINE_LOG_DEBUG << "Get table files by id"; + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN RETRIEVING TABLE FILES", e.what()); + } +} + +Status +MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableIndexParamQuery = connectionPtr->query(); + updateTableIndexParamQuery << "SELECT id, state, dimension, created_on" + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); + + mysqlpp::StoreQueryResult res = updateTableIndexParamQuery.store(); + + if (res.num_rows() == 1) { + const mysqlpp::Row& resRow = res[0]; + + size_t id = resRow["id"]; + int32_t state = resRow["state"]; + uint16_t dimension = resRow["dimension"]; + int64_t created_on = resRow["created_on"]; + + updateTableIndexParamQuery << "UPDATE " << META_TABLES << " SET id = " << id << " ,state = " << state + << " ,dimension = " << dimension << " ,created_on = " << created_on + << " ,engine_type = " << index.engine_type_ << " ,nlist = " << index.nlist_ + << " ,metric_type = " << index.metric_type_ + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); + + if (!updateTableIndexParamQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE INDEX PARAM", + updateTableIndexParamQuery.error()); + } + } else { + return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE INDEX PARAM", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableFlagQuery = connectionPtr->query(); + updateTableFlagQuery << "UPDATE " << META_TABLES << " SET flag = " << flag + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlag: " << updateTableFlagQuery.str(); + + if (!updateTableFlagQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE FLAG", updateTableFlagQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); + } + + return Status::OK(); +} + +// ZR: this function assumes all fields in file_schema have value +Status +MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { + file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); + try { server::MetricCollector metric; { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } - mysqlpp::Query createTableQuery = connectionPtr->query(); + mysqlpp::Query updateTableFileQuery = connectionPtr->query(); - if (table_schema.table_id_.empty()) { - NextTableId(table_schema.table_id_); - } else { - createTableQuery << "SELECT state FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ << ";"; + // if the table has been deleted, just mark the table file as TO_DELETE + // clean thread will delete the file later + updateTableFileQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << file_schema.table_id_ << ";"; - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - mysqlpp::StoreQueryResult res = createTableQuery.store(); + mysqlpp::StoreQueryResult res = updateTableFileQuery.store(); - if (res.num_rows() == 1) { - int state = res[0]["state"]; - if (TableSchema::TO_DELETE == state) { - return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); - } else { - return Status(DB_ALREADY_EXIST, "Table already exists"); - } + if (res.num_rows() == 1) { + int state = res[0]["state"]; + if (state == TableSchema::TO_DELETE) { + file_schema.file_type_ = TableFileSchema::TO_DELETE; } + } else { + file_schema.file_type_ = TableFileSchema::TO_DELETE; } - table_schema.id_ = -1; - table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + std::string id = std::to_string(file_schema.id_); + std::string table_id = file_schema.table_id_; + std::string engine_type = std::to_string(file_schema.engine_type_); + std::string file_id = file_schema.file_id_; + std::string file_type = std::to_string(file_schema.file_type_); + std::string file_size = std::to_string(file_schema.file_size_); + std::string row_count = std::to_string(file_schema.row_count_); + std::string updated_time = std::to_string(file_schema.updated_time_); + std::string created_on = std::to_string(file_schema.created_on_); + std::string date = std::to_string(file_schema.date_); - std::string id = "NULL"; // auto-increment - std::string table_id = table_schema.table_id_; - std::string state = std::to_string(table_schema.state_); - std::string dimension = std::to_string(table_schema.dimension_); - std::string created_on = std::to_string(table_schema.created_on_); - std::string flag = std::to_string(table_schema.flag_); - std::string index_file_size = std::to_string(table_schema.index_file_size_); - std::string engine_type = std::to_string(table_schema.engine_type_); - std::string nlist = std::to_string(table_schema.nlist_); - std::string metric_type = std::to_string(table_schema.metric_type_); + updateTableFileQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote << table_id + << " ,engine_type = " << engine_type << " ,file_id = " << mysqlpp::quote << file_id + << " ,file_type = " << file_type << " ,file_size = " << file_size + << " ,row_count = " << row_count << " ,updated_time = " << updated_time + << " ,created_on = " << created_on << " ,date = " << date << " WHERE id = " << id + << ";"; - createTableQuery << "INSERT INTO " << META_TABLES << " " - << "VALUES(" << id << ", " << mysqlpp::quote << table_id << ", " << state << ", " - << dimension << ", " << created_on << ", " << flag << ", " << index_file_size << ", " - << engine_type << ", " << nlist << ", " << metric_type << ");"; + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str(); - - if (mysqlpp::SimpleResult res = createTableQuery.execute()) { - table_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? - - // Consume all results to avoid "Commands out of sync" error - } else { - return HandleException("Add Table Error", createTableQuery.error()); + if (!updateTableFileQuery.exec()) { + ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_; + return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE", updateTableFileQuery.error()); } } // Scoped Connection - ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; - return utils::CreateTablePath(options_, table_schema.table_id_); + ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CREATING TABLE", e.what()); + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILE", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { + try { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableFilesToIndexQuery = connectionPtr->query(); + + updateTableFilesToIndexQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::RAW) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesToIndex: " << updateTableFilesToIndexQuery.str(); + + if (!updateTableFilesToIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE TO INDEX", + updateTableFilesToIndexQuery.error()); + } + + ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES TO INDEX", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { + try { + server::MetricCollector metric; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query updateTableFilesQuery = connectionPtr->query(); + + std::map has_tables; + for (auto& file_schema : files) { + if (has_tables.find(file_schema.table_id_) != has_tables.end()) { + continue; + } + + updateTableFilesQuery << "SELECT EXISTS" + << " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote + << file_schema.table_id_ << " AND state <> " + << std::to_string(TableSchema::TO_DELETE) << ")" + << " AS " << mysqlpp::quote << "check" + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); + + mysqlpp::StoreQueryResult res = updateTableFilesQuery.store(); + + int check = res[0]["check"]; + has_tables[file_schema.table_id_] = (check == 1); + } + + for (auto& file_schema : files) { + if (!has_tables[file_schema.table_id_]) { + file_schema.file_type_ = TableFileSchema::TO_DELETE; + } + file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); + + std::string id = std::to_string(file_schema.id_); + std::string& table_id = file_schema.table_id_; + std::string engine_type = std::to_string(file_schema.engine_type_); + std::string& file_id = file_schema.file_id_; + std::string file_type = std::to_string(file_schema.file_type_); + std::string file_size = std::to_string(file_schema.file_size_); + std::string row_count = std::to_string(file_schema.row_count_); + std::string updated_time = std::to_string(file_schema.updated_time_); + std::string created_on = std::to_string(file_schema.created_on_); + std::string date = std::to_string(file_schema.date_); + + updateTableFilesQuery << "UPDATE " << META_TABLEFILES << " SET table_id = " << mysqlpp::quote + << table_id << " ,engine_type = " << engine_type + << " ,file_id = " << mysqlpp::quote << file_id << " ,file_type = " << file_type + << " ,file_size = " << file_size << " ,row_count = " << row_count + << " ,updated_time = " << updated_time << " ,created_on = " << created_on + << " ,date = " << date << " WHERE id = " << id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); + + if (!updateTableFilesQuery.exec()) { + return HandleException("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error()); + } + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query describeTableIndexQuery = connectionPtr->query(); + describeTableIndexQuery << "SELECT engine_type, nlist, index_file_size, metric_type" + << " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTableIndex: " << describeTableIndexQuery.str(); + + mysqlpp::StoreQueryResult res = describeTableIndexQuery.store(); + + if (res.num_rows() == 1) { + const mysqlpp::Row& resRow = res[0]; + + index.engine_type_ = resRow["engine_type"]; + index.nlist_ = resRow["nlist"]; + index.metric_type_ = resRow["metric_type"]; + } else { + return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); + } + } // Scoped Connection + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::DropTableIndex(const std::string& table_id) { + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query dropTableIndexQuery = connectionPtr->query(); + + // soft delete index files + dropTableIndexQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << utils::GetMicroSecTimeStamp() + << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::INDEX) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); + + if (!dropTableIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); + } + + // set all backup file to raw + dropTableIndexQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::RAW) + << " ,updated_time = " << utils::GetMicroSecTimeStamp() + << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::BACKUP) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); + + if (!dropTableIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); + } + + // set table index type to raw + dropTableIndexQuery << "UPDATE " << META_TABLES + << " SET engine_type = " << std::to_string(DEFAULT_ENGINE_TYPE) + << " ,nlist = " << std::to_string(DEFAULT_NLIST) + << " ,metric_type = " << std::to_string(DEFAULT_METRIC_TYPE) + << " WHERE table_id = " << mysqlpp::quote << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); + + if (!dropTableIndexQuery.exec()) { + return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); + } + } // Scoped Connection + + ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DROPPING TABLE INDEX", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) { + server::MetricCollector metric; + + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + // not allow create partition under partition + if (!table_schema.owner_table_.empty()) { + return Status(DB_ERROR, "Nested partition is not allow"); + } + + if (partition_name == "") { + // not allow duplicated partition + std::string exist_partition; + GetPartitionName(table_id, tag, exist_partition); + if (!exist_partition.empty()) { + return Status(DB_ERROR, "Duplicated partition is not allow"); + } + + NextTableId(table_schema.table_id_); + } else { + table_schema.table_id_ = partition_name; + } + + table_schema.id_ = -1; + table_schema.flag_ = 0; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + table_schema.owner_table_ = table_id; + table_schema.partition_tag_ = tag; + + return CreateTable(table_schema); +} + +Status +MySQLMetaImpl::DropPartition(const std::string& partition_name) { + return DropTable(partition_name); +} + +Status +MySQLMetaImpl::ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query allPartitionsQuery = connectionPtr->query(); + allPartitionsQuery << "SELECT table_id FROM " << META_TABLES << " WHERE owner_table = " << mysqlpp::quote + << table_id << " AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str(); + + res = allPartitionsQuery.store(); + } // Scoped Connection + + for (auto& resRow : res) { + meta::TableSchema partition_schema; + resRow["table_id"].to_string(partition_schema.table_id_); + DescribeTable(partition_schema); + partiton_schema_array.emplace_back(partition_schema); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN SHOW PARTITIONS", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) { + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query allPartitionsQuery = connectionPtr->query(); + allPartitionsQuery << "SELECT table_id FROM " << META_TABLES << " WHERE owner_table = " << mysqlpp::quote + << table_id << " AND partition_tag = " << mysqlpp::quote << tag << " AND state <> " + << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str(); + + res = allPartitionsQuery.store(); + } // Scoped Connection + + if (res.num_rows() > 0) { + const mysqlpp::Row& resRow = res[0]; + resRow["table_id"].to_string(partition_name); + } else { + return Status(DB_NOT_FOUND, "Partition " + tag + " of table " + table_id + " not found"); + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN GET PARTITION NAME", e.what()); + } + + return Status::OK(); +} + +Status +MySQLMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, + DatePartionedTableFilesSchema& files) { + files.clear(); + + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query filesToSearchQuery = connectionPtr->query(); + filesToSearchQuery << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id; + + if (!dates.empty()) { + std::stringstream partitionListSS; + for (auto& date : dates) { + partitionListSS << std::to_string(date) << ", "; + } + std::string partitionListStr = partitionListSS.str(); + + partitionListStr = partitionListStr.substr(0, partitionListStr.size() - 2); // remove the last ", " + filesToSearchQuery << " AND date IN (" << partitionListStr << ")"; + } + + if (!ids.empty()) { + std::stringstream idSS; + for (auto& id : ids) { + idSS << "id = " << std::to_string(id) << " OR "; + } + std::string idStr = idSS.str(); + idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " + + filesToSearchQuery << " AND (" << idStr << ")"; + } + // End + filesToSearchQuery << " AND" + << " (file_type = " << std::to_string(TableFileSchema::RAW) + << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToSearch: " << filesToSearchQuery.str(); + + res = filesToSearchQuery.store(); + } // Scoped Connection + + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + Status ret; + TableFileSchema table_file; + for (auto& resRow : res) { + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + table_file.index_file_size_ = table_schema.index_file_size_; + table_file.engine_type_ = resRow["engine_type"]; + table_file.nlist_ = table_schema.nlist_; + table_file.metric_type_ = table_schema.metric_type_; + resRow["file_id"].to_string(table_file.file_id_); + table_file.file_type_ = resRow["file_type"]; + table_file.file_size_ = resRow["file_size"]; + table_file.row_count_ = resRow["row_count"]; + table_file.date_ = resRow["date"]; + table_file.dimension_ = table_schema.dimension_; + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + + auto dateItr = files.find(table_file.date_); + if (dateItr == files.end()) { + files[table_file.date_] = TableFilesSchema(); + } + + files[table_file.date_].push_back(table_file); + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-search files"; + } + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO SEARCH", e.what()); + } +} + +Status +MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) { + files.clear(); + + try { + server::MetricCollector metric; + + // check table existence + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query filesToMergeQuery = connectionPtr->query(); + filesToMergeQuery + << "SELECT id, table_id, file_id, file_type, file_size, row_count, date, engine_type, created_on" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type = " << std::to_string(TableFileSchema::RAW) << " ORDER BY row_count DESC;"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToMerge: " << filesToMergeQuery.str(); + + res = filesToMergeQuery.store(); + } // Scoped Connection + + Status ret; + for (auto& resRow : res) { + TableFileSchema table_file; + table_file.file_size_ = resRow["file_size"]; + if (table_file.file_size_ >= table_schema.index_file_size_) { + continue; // skip large file + } + + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + resRow["file_id"].to_string(table_file.file_id_); + table_file.file_type_ = resRow["file_type"]; + table_file.row_count_ = resRow["row_count"]; + table_file.date_ = resRow["date"]; + table_file.index_file_size_ = table_schema.index_file_size_; + table_file.engine_type_ = resRow["engine_type"]; + table_file.nlist_ = table_schema.nlist_; + table_file.metric_type_ = table_schema.metric_type_; + table_file.created_on_ = resRow["created_on"]; + table_file.dimension_ = table_schema.dimension_; + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + + auto dateItr = files.find(table_file.date_); + if (dateItr == files.end()) { + files[table_file.date_] = TableFilesSchema(); + } + + files[table_file.date_].push_back(table_file); + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-merge files"; + } + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO MERGE", e.what()); + } +} + +Status +MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { + files.clear(); + + try { + server::MetricCollector metric; + mysqlpp::StoreQueryResult res; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query filesToIndexQuery = connectionPtr->query(); + filesToIndexQuery + << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date, created_on" + << " FROM " << META_TABLEFILES << " WHERE file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToIndex: " << filesToIndexQuery.str(); + + res = filesToIndexQuery.store(); + } // Scoped Connection + + Status ret; + std::map groups; + TableFileSchema table_file; + for (auto& resRow : res) { + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + table_file.engine_type_ = resRow["engine_type"]; + resRow["file_id"].to_string(table_file.file_id_); + table_file.file_type_ = resRow["file_type"]; + table_file.file_size_ = resRow["file_size"]; + table_file.row_count_ = resRow["row_count"]; + table_file.date_ = resRow["date"]; + table_file.created_on_ = resRow["created_on"]; + + auto groupItr = groups.find(table_file.table_id_); + if (groupItr == groups.end()) { + TableSchema table_schema; + table_schema.table_id_ = table_file.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + groups[table_file.table_id_] = table_schema; + } + table_file.dimension_ = groups[table_file.table_id_].dimension_; + table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; + table_file.nlist_ = groups[table_file.table_id_].nlist_; + table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + + files.push_back(table_file); + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-index files"; + } + return ret; + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO INDEX", e.what()); } } @@ -465,7 +1520,7 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } std::string types; @@ -478,9 +1533,9 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& mysqlpp::Query hasNonIndexFilesQuery = connectionPtr->query(); // since table_id is a unique column we just need to check whether it exists or not - hasNonIndexFilesQuery << "SELECT file_id, file_type FROM " << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type in (" << types << ");"; + hasNonIndexFilesQuery << "SELECT file_id, file_type" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND file_type in (" << types << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesByType: " << hasNonIndexFilesQuery.str(); @@ -535,854 +1590,6 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector& return Status::OK(); } -Status -MySQLMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& index) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableIndexParamQuery = connectionPtr->query(); - updateTableIndexParamQuery << "SELECT id, state, dimension, created_on FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); - - mysqlpp::StoreQueryResult res = updateTableIndexParamQuery.store(); - - if (res.num_rows() == 1) { - const mysqlpp::Row& resRow = res[0]; - - size_t id = resRow["id"]; - int32_t state = resRow["state"]; - uint16_t dimension = resRow["dimension"]; - int64_t created_on = resRow["created_on"]; - - updateTableIndexParamQuery << "UPDATE " << META_TABLES << " " - << "SET id = " << id << ", " - << "state = " << state << ", " - << "dimension = " << dimension << ", " - << "created_on = " << created_on << ", " - << "engine_type = " << index.engine_type_ << ", " - << "nlist = " << index.nlist_ << ", " - << "metric_type = " << index.metric_type_ << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str(); - - if (!updateTableIndexParamQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE INDEX PARAM", - updateTableIndexParamQuery.error()); - } - } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE INDEX PARAM", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFlagQuery = connectionPtr->query(); - updateTableFlagQuery << "UPDATE " << META_TABLES << " " - << "SET flag = " << flag << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlag: " << updateTableFlagQuery.str(); - - if (!updateTableFlagQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE FLAG", updateTableFlagQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& index) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query describeTableIndexQuery = connectionPtr->query(); - describeTableIndexQuery << "SELECT engine_type, nlist, index_file_size, metric_type FROM " << META_TABLES - << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTableIndex: " << describeTableIndexQuery.str(); - - mysqlpp::StoreQueryResult res = describeTableIndexQuery.store(); - - if (res.num_rows() == 1) { - const mysqlpp::Row& resRow = res[0]; - - index.engine_type_ = resRow["engine_type"]; - index.nlist_ = resRow["nlist"]; - index.metric_type_ = resRow["metric_type"]; - } else { - return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); - } - } // Scoped Connection - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FLAG", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DropTableIndex(const std::string& table_id) { - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query dropTableIndexQuery = connectionPtr->query(); - - // soft delete index files - dropTableIndexQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << "," - << "updated_time = " << utils::GetMicroSecTimeStamp() << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::INDEX) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); - - if (!dropTableIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); - } - - // set all backup file to raw - dropTableIndexQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::RAW) << "," - << "updated_time = " << utils::GetMicroSecTimeStamp() << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::BACKUP) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); - - if (!dropTableIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); - } - - // set table index type to raw - dropTableIndexQuery << "UPDATE " << META_TABLES << " " - << "SET engine_type = " << std::to_string(DEFAULT_ENGINE_TYPE) << "," - << "nlist = " << std::to_string(DEFAULT_NLIST) << ", " - << "metric_type = " << std::to_string(DEFAULT_METRIC_TYPE) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str(); - - if (!dropTableIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DROPPING TABLE INDEX", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DeleteTable(const std::string& table_id) { - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - // soft delete table - mysqlpp::Query deleteTableQuery = connectionPtr->query(); - // - deleteTableQuery << "UPDATE " << META_TABLES << " " - << "SET state = " << std::to_string(TableSchema::TO_DELETE) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTable: " << deleteTableQuery.str(); - - if (!deleteTableQuery.exec()) { - return HandleException("QUERY ERROR WHEN DELETING TABLE", deleteTableQuery.error()); - } - } // Scoped Connection - - if (mode_ == DBOptions::MODE::CLUSTER_WRITABLE) { - DeleteTableFiles(table_id); - } - - ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DELETING TABLE", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DeleteTableFiles(const std::string& table_id) { - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - // soft delete table files - mysqlpp::Query deleteTableFilesQuery = connectionPtr->query(); - // - deleteTableFilesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << ", " - << "updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DeleteTableFiles: " << deleteTableFilesQuery.str(); - - if (!deleteTableFilesQuery.exec()) { - return HandleException("QUERY ERROR WHEN DELETING TABLE FILES", deleteTableFilesQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DELETING TABLE FILES", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::DescribeTable(TableSchema& table_schema) { - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query describeTableQuery = connectionPtr->query(); - describeTableQuery - << "SELECT id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type " - << " FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_schema.table_id_ << " " - << "AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTable: " << describeTableQuery.str(); - - res = describeTableQuery.store(); - } // Scoped Connection - - if (res.num_rows() == 1) { - const mysqlpp::Row& resRow = res[0]; - - table_schema.id_ = resRow["id"]; // implicit conversion - - table_schema.state_ = resRow["state"]; - - table_schema.dimension_ = resRow["dimension"]; - - table_schema.created_on_ = resRow["created_on"]; - - table_schema.flag_ = resRow["flag"]; - - table_schema.index_file_size_ = resRow["index_file_size"]; - - table_schema.engine_type_ = resRow["engine_type"]; - - table_schema.nlist_ = resRow["nlist"]; - - table_schema.metric_type_ = resRow["metric_type"]; - } else { - return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); - } - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DESCRIBING TABLE", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query hasTableQuery = connectionPtr->query(); - // since table_id is a unique column we just need to check whether it exists or not - hasTableQuery << "SELECT EXISTS " - << "(SELECT 1 FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " " - << "AND state <> " << std::to_string(TableSchema::TO_DELETE) << ") " - << "AS " << mysqlpp::quote << "check" - << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::HasTable: " << hasTableQuery.str(); - - res = hasTableQuery.store(); - } // Scoped Connection - - int check = res[0]["check"]; - has_or_not = (check == 1); - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CHECKING IF TABLE EXISTS", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::AllTables(std::vector& table_schema_array) { - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query allTablesQuery = connectionPtr->query(); - allTablesQuery << "SELECT id, table_id, dimension, engine_type, nlist, index_file_size, metric_type FROM " - << META_TABLES << " " - << "WHERE state <> " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allTablesQuery.str(); - - res = allTablesQuery.store(); - } // Scoped Connection - - for (auto& resRow : res) { - TableSchema table_schema; - - table_schema.id_ = resRow["id"]; // implicit conversion - - std::string table_id; - resRow["table_id"].to_string(table_id); - table_schema.table_id_ = table_id; - - table_schema.dimension_ = resRow["dimension"]; - - table_schema.index_file_size_ = resRow["index_file_size"]; - - table_schema.engine_type_ = resRow["engine_type"]; - - table_schema.nlist_ = resRow["nlist"]; - - table_schema.metric_type_ = resRow["metric_type"]; - - table_schema_array.emplace_back(table_schema); - } - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DESCRIBING ALL TABLES", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::CreateTableFile(TableFileSchema& file_schema) { - if (file_schema.date_ == EmptyDate) { - file_schema.date_ = utils::GetDate(); - } - TableSchema table_schema; - table_schema.table_id_ = file_schema.table_id_; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - - try { - server::MetricCollector metric; - - NextFileId(file_schema.file_id_); - file_schema.dimension_ = table_schema.dimension_; - file_schema.file_size_ = 0; - file_schema.row_count_ = 0; - file_schema.created_on_ = utils::GetMicroSecTimeStamp(); - file_schema.updated_time_ = file_schema.created_on_; - file_schema.index_file_size_ = table_schema.index_file_size_; - file_schema.engine_type_ = table_schema.engine_type_; - file_schema.nlist_ = table_schema.nlist_; - file_schema.metric_type_ = table_schema.metric_type_; - - std::string id = "NULL"; // auto-increment - std::string table_id = file_schema.table_id_; - std::string engine_type = std::to_string(file_schema.engine_type_); - std::string file_id = file_schema.file_id_; - std::string file_type = std::to_string(file_schema.file_type_); - std::string file_size = std::to_string(file_schema.file_size_); - std::string row_count = std::to_string(file_schema.row_count_); - std::string updated_time = std::to_string(file_schema.updated_time_); - std::string created_on = std::to_string(file_schema.created_on_); - std::string date = std::to_string(file_schema.date_); - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query createTableFileQuery = connectionPtr->query(); - - createTableFileQuery << "INSERT INTO " << META_TABLEFILES << " " - << "VALUES(" << id << ", " << mysqlpp::quote << table_id << ", " << engine_type << ", " - << mysqlpp::quote << file_id << ", " << file_type << ", " << file_size << ", " - << row_count << ", " << updated_time << ", " << created_on << ", " << date << ");"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTableFile: " << createTableFileQuery.str(); - - if (mysqlpp::SimpleResult res = createTableFileQuery.execute()) { - file_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()? - - // Consume all results to avoid "Commands out of sync" error - } else { - return HandleException("QUERY ERROR WHEN CREATING TABLE FILE", createTableFileQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; - return utils::CreateTableFilePath(options_, file_schema); - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CREATING TABLE FILE", e.what()); - } -} - -Status -MySQLMetaImpl::FilesToIndex(TableFilesSchema& files) { - files.clear(); - - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query filesToIndexQuery = connectionPtr->query(); - filesToIndexQuery - << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date, created_on FROM " - << META_TABLEFILES << " " - << "WHERE file_type = " << std::to_string(TableFileSchema::TO_INDEX) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToIndex: " << filesToIndexQuery.str(); - - res = filesToIndexQuery.store(); - } // Scoped Connection - - Status ret; - std::map groups; - TableFileSchema table_file; - for (auto& resRow : res) { - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id; - resRow["table_id"].to_string(table_id); - table_file.table_id_ = table_id; - - table_file.engine_type_ = resRow["engine_type"]; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.file_type_ = resRow["file_type"]; - - table_file.file_size_ = resRow["file_size"]; - - table_file.row_count_ = resRow["row_count"]; - - table_file.date_ = resRow["date"]; - - table_file.created_on_ = resRow["created_on"]; - - auto groupItr = groups.find(table_file.table_id_); - if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - groups[table_file.table_id_] = table_schema; - } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.nlist_ = groups[table_file.table_id_].nlist_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - - files.push_back(table_file); - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-index files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO INDEX", e.what()); - } -} - -Status -MySQLMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, - DatePartionedTableFilesSchema& files) { - files.clear(); - - try { - server::MetricCollector metric; - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query filesToSearchQuery = connectionPtr->query(); - filesToSearchQuery - << "SELECT id, table_id, engine_type, file_id, file_type, file_size, row_count, date FROM " - << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id; - - if (!dates.empty()) { - std::stringstream partitionListSS; - for (auto& date : dates) { - partitionListSS << std::to_string(date) << ", "; - } - std::string partitionListStr = partitionListSS.str(); - - partitionListStr = partitionListStr.substr(0, partitionListStr.size() - 2); // remove the last ", " - filesToSearchQuery << " AND " - << "date IN (" << partitionListStr << ")"; - } - - if (!ids.empty()) { - std::stringstream idSS; - for (auto& id : ids) { - idSS << "id = " << std::to_string(id) << " OR "; - } - std::string idStr = idSS.str(); - idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " - - filesToSearchQuery << " AND " - << "(" << idStr << ")"; - } - // End - filesToSearchQuery << " AND " - << "(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " - << "file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " - << "file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToSearch: " << filesToSearchQuery.str(); - - res = filesToSearchQuery.store(); - } // Scoped Connection - - TableSchema table_schema; - table_schema.table_id_ = table_id; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - - Status ret; - TableFileSchema table_file; - for (auto& resRow : res) { - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id_str; - resRow["table_id"].to_string(table_id_str); - table_file.table_id_ = table_id_str; - - table_file.index_file_size_ = table_schema.index_file_size_; - - table_file.engine_type_ = resRow["engine_type"]; - - table_file.nlist_ = table_schema.nlist_; - - table_file.metric_type_ = table_schema.metric_type_; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.file_type_ = resRow["file_type"]; - - table_file.file_size_ = resRow["file_size"]; - - table_file.row_count_ = resRow["row_count"]; - - table_file.date_ = resRow["date"]; - - table_file.dimension_ = table_schema.dimension_; - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - - auto dateItr = files.find(table_file.date_); - if (dateItr == files.end()) { - files[table_file.date_] = TableFilesSchema(); - } - - files[table_file.date_].push_back(table_file); - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-search files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO SEARCH", e.what()); - } -} - -Status -MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) { - files.clear(); - - try { - server::MetricCollector metric; - - // check table existence - TableSchema table_schema; - table_schema.table_id_ = table_id; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query filesToMergeQuery = connectionPtr->query(); - filesToMergeQuery - << "SELECT id, table_id, file_id, file_type, file_size, row_count, date, engine_type, created_on FROM " - << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::RAW) << " " - << "ORDER BY row_count DESC" - << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::FilesToMerge: " << filesToMergeQuery.str(); - - res = filesToMergeQuery.store(); - } // Scoped Connection - - Status ret; - for (auto& resRow : res) { - TableFileSchema table_file; - table_file.file_size_ = resRow["file_size"]; - if (table_file.file_size_ >= table_schema.index_file_size_) { - continue; // skip large file - } - - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id_str; - resRow["table_id"].to_string(table_id_str); - table_file.table_id_ = table_id_str; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.file_type_ = resRow["file_type"]; - - table_file.row_count_ = resRow["row_count"]; - - table_file.date_ = resRow["date"]; - - table_file.index_file_size_ = table_schema.index_file_size_; - - table_file.engine_type_ = resRow["engine_type"]; - - table_file.nlist_ = table_schema.nlist_; - - table_file.metric_type_ = table_schema.metric_type_; - - table_file.created_on_ = resRow["created_on"]; - - table_file.dimension_ = table_schema.dimension_; - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - - auto dateItr = files.find(table_file.date_); - if (dateItr == files.end()) { - files[table_file.date_] = TableFilesSchema(); - } - - files[table_file.date_].push_back(table_file); - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-merge files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN FINDING TABLE FILES TO MERGE", e.what()); - } -} - -Status -MySQLMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, - TableFilesSchema& table_files) { - if (ids.empty()) { - return Status::OK(); - } - - std::stringstream idSS; - for (auto& id : ids) { - idSS << "id = " << std::to_string(id) << " OR "; - } - std::string idStr = idSS.str(); - idStr = idStr.substr(0, idStr.size() - 4); // remove the last " OR " - - try { - mysqlpp::StoreQueryResult res; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query getTableFileQuery = connectionPtr->query(); - getTableFileQuery - << "SELECT id, engine_type, file_id, file_type, file_size, row_count, date, created_on FROM " - << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "(" << idStr << ") AND " - << "file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFiles: " << getTableFileQuery.str(); - - res = getTableFileQuery.store(); - } // Scoped Connection - - TableSchema table_schema; - table_schema.table_id_ = table_id; - DescribeTable(table_schema); - - Status ret; - for (auto& resRow : res) { - TableFileSchema file_schema; - - file_schema.id_ = resRow["id"]; - - file_schema.table_id_ = table_id; - - file_schema.index_file_size_ = table_schema.index_file_size_; - - file_schema.engine_type_ = resRow["engine_type"]; - - file_schema.nlist_ = table_schema.nlist_; - - file_schema.metric_type_ = table_schema.metric_type_; - - std::string file_id; - resRow["file_id"].to_string(file_id); - file_schema.file_id_ = file_id; - - file_schema.file_type_ = resRow["file_type"]; - - file_schema.file_size_ = resRow["file_size"]; - - file_schema.row_count_ = resRow["row_count"]; - - file_schema.date_ = resRow["date"]; - - file_schema.created_on_ = resRow["created_on"]; - - file_schema.dimension_ = table_schema.dimension_; - - utils::GetTableFilePath(options_, file_schema); - - table_files.emplace_back(file_schema); - } - - ENGINE_LOG_DEBUG << "Get table files by id"; - return ret; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN RETRIEVING TABLE FILES", e.what()); - } -} - // TODO(myh): Support swap to cloud storage Status MySQLMetaImpl::Archive() { @@ -1402,14 +1609,14 @@ MySQLMetaImpl::Archive() { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query archiveQuery = connectionPtr->query(); - archiveQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " " - << "WHERE created_on < " << std::to_string(now - usecs) << " AND " - << "file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + archiveQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " WHERE created_on < " << std::to_string(now - usecs) << " AND file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Archive: " << archiveQuery.str(); @@ -1446,12 +1653,13 @@ MySQLMetaImpl::Size(uint64_t& result) { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query getSizeQuery = connectionPtr->query(); - getSizeQuery << "SELECT IFNULL(SUM(file_size),0) AS sum FROM " << META_TABLEFILES << " " - << "WHERE file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";"; + getSizeQuery << "SELECT IFNULL(SUM(file_size),0) AS sum" + << " FROM " << META_TABLEFILES << " WHERE file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Size: " << getSizeQuery.str(); @@ -1470,434 +1678,20 @@ MySQLMetaImpl::Size(uint64_t& result) { return Status::OK(); } -Status -MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { - if (to_discard_size <= 0) { - return Status::OK(); - } - ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; - - try { - server::MetricCollector metric; - bool status; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query discardFilesQuery = connectionPtr->query(); - discardFilesQuery << "SELECT id, file_size FROM " << META_TABLEFILES << " " - << "WHERE file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << " " - << "ORDER BY id ASC " - << "LIMIT 10;"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); - - mysqlpp::StoreQueryResult res = discardFilesQuery.store(); - if (res.num_rows() == 0) { - return Status::OK(); - } - - TableFileSchema table_file; - std::stringstream idsToDiscardSS; - for (auto& resRow : res) { - if (to_discard_size <= 0) { - break; - } - table_file.id_ = resRow["id"]; - table_file.file_size_ = resRow["file_size"]; - idsToDiscardSS << "id = " << std::to_string(table_file.id_) << " OR "; - ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ - << " table_file.size=" << table_file.file_size_; - to_discard_size -= table_file.file_size_; - } - - std::string idsToDiscardStr = idsToDiscardSS.str(); - idsToDiscardStr = idsToDiscardStr.substr(0, idsToDiscardStr.size() - 4); // remove the last " OR " - - discardFilesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << ", " - << "updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " " - << "WHERE " << idsToDiscardStr << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); - - status = discardFilesQuery.exec(); - if (!status) { - return HandleException("QUERY ERROR WHEN DISCARDING FILES", discardFilesQuery.error()); - } - } // Scoped Connection - - return DiscardFiles(to_discard_size); - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN DISCARDING FILES", e.what()); - } -} - -// ZR: this function assumes all fields in file_schema have value -Status -MySQLMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { - file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); - - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFileQuery = connectionPtr->query(); - - // if the table has been deleted, just mark the table file as TO_DELETE - // clean thread will delete the file later - updateTableFileQuery << "SELECT state FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << file_schema.table_id_ << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - - mysqlpp::StoreQueryResult res = updateTableFileQuery.store(); - - if (res.num_rows() == 1) { - int state = res[0]["state"]; - if (state == TableSchema::TO_DELETE) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - } else { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - - std::string id = std::to_string(file_schema.id_); - std::string table_id = file_schema.table_id_; - std::string engine_type = std::to_string(file_schema.engine_type_); - std::string file_id = file_schema.file_id_; - std::string file_type = std::to_string(file_schema.file_type_); - std::string file_size = std::to_string(file_schema.file_size_); - std::string row_count = std::to_string(file_schema.row_count_); - std::string updated_time = std::to_string(file_schema.updated_time_); - std::string created_on = std::to_string(file_schema.created_on_); - std::string date = std::to_string(file_schema.date_); - - updateTableFileQuery << "UPDATE " << META_TABLEFILES << " " - << "SET table_id = " << mysqlpp::quote << table_id << ", " - << "engine_type = " << engine_type << ", " - << "file_id = " << mysqlpp::quote << file_id << ", " - << "file_type = " << file_type << ", " - << "file_size = " << file_size << ", " - << "row_count = " << row_count << ", " - << "updated_time = " << updated_time << ", " - << "created_on = " << created_on << ", " - << "date = " << date << " " - << "WHERE id = " << id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str(); - - if (!updateTableFileQuery.exec()) { - ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_; - return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE", updateTableFileQuery.error()); - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILE", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { - try { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFilesToIndexQuery = connectionPtr->query(); - - updateTableFilesToIndexQuery << "UPDATE " << META_TABLEFILES << " " - << "SET file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "file_type = " << std::to_string(TableFileSchema::RAW) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesToIndex: " << updateTableFilesToIndexQuery.str(); - - if (!updateTableFilesToIndexQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE TO INDEX", - updateTableFilesToIndexQuery.error()); - } - - ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES TO INDEX", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::UpdateTableFiles(TableFilesSchema& files) { - try { - server::MetricCollector metric; - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query updateTableFilesQuery = connectionPtr->query(); - - std::map has_tables; - for (auto& file_schema : files) { - if (has_tables.find(file_schema.table_id_) != has_tables.end()) { - continue; - } - - updateTableFilesQuery << "SELECT EXISTS " - << "(SELECT 1 FROM " << META_TABLES << " " - << "WHERE table_id = " << mysqlpp::quote << file_schema.table_id_ << " " - << "AND state <> " << std::to_string(TableSchema::TO_DELETE) << ") " - << "AS " << mysqlpp::quote << "check" - << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); - - mysqlpp::StoreQueryResult res = updateTableFilesQuery.store(); - - int check = res[0]["check"]; - has_tables[file_schema.table_id_] = (check == 1); - } - - for (auto& file_schema : files) { - if (!has_tables[file_schema.table_id_]) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); - - std::string id = std::to_string(file_schema.id_); - std::string table_id = file_schema.table_id_; - std::string engine_type = std::to_string(file_schema.engine_type_); - std::string file_id = file_schema.file_id_; - std::string file_type = std::to_string(file_schema.file_type_); - std::string file_size = std::to_string(file_schema.file_size_); - std::string row_count = std::to_string(file_schema.row_count_); - std::string updated_time = std::to_string(file_schema.updated_time_); - std::string created_on = std::to_string(file_schema.created_on_); - std::string date = std::to_string(file_schema.date_); - - updateTableFilesQuery << "UPDATE " << META_TABLEFILES << " " - << "SET table_id = " << mysqlpp::quote << table_id << ", " - << "engine_type = " << engine_type << ", " - << "file_id = " << mysqlpp::quote << file_id << ", " - << "file_type = " << file_type << ", " - << "file_size = " << file_size << ", " - << "row_count = " << row_count << ", " - << "updated_time = " << updated_time << ", " - << "created_on = " << created_on << ", " - << "date = " << date << " " - << "WHERE id = " << id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str(); - - if (!updateTableFilesQuery.exec()) { - return HandleException("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error()); - } - } - } // Scoped Connection - - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN UPDATING TABLE FILES", e.what()); - } - - return Status::OK(); -} - -Status -MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { - auto now = utils::GetMicroSecTimeStamp(); - std::set table_ids; - - // remove to_delete files - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); - cleanUpFilesWithTTLQuery << "SELECT id, table_id, file_id, date FROM " << META_TABLEFILES << " " - << "WHERE file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " AND " - << "updated_time < " << std::to_string(now - seconds * US_PS) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - - TableFileSchema table_file; - std::vector idsToDelete; - - for (auto& resRow : res) { - table_file.id_ = resRow["id"]; // implicit conversion - - std::string table_id; - resRow["table_id"].to_string(table_id); - table_file.table_id_ = table_id; - - std::string file_id; - resRow["file_id"].to_string(file_id); - table_file.file_id_ = file_id; - - table_file.date_ = resRow["date"]; - - utils::DeleteTableFilePath(options_, table_file); - - ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_; - - idsToDelete.emplace_back(std::to_string(table_file.id_)); - - table_ids.insert(table_file.table_id_); - } - - if (!idsToDelete.empty()) { - std::stringstream idsToDeleteSS; - for (auto& id : idsToDelete) { - idsToDeleteSS << "id = " << id << " OR "; - } - - std::string idsToDeleteStr = idsToDeleteSS.str(); - idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " - cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLEFILES << " " - << "WHERE " << idsToDeleteStr << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - if (!cleanUpFilesWithTTLQuery.exec()) { - return HandleException("QUERY ERROR WHEN CLEANING UP FILES WITH TTL", - cleanUpFilesWithTTLQuery.error()); - } - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Clean " << res.size() << " files deleted in " << seconds << " seconds"; - } - } // Scoped Connection - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CLEANING UP FILES WITH TTL", e.what()); - } - - // remove to_delete tables - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); - cleanUpFilesWithTTLQuery << "SELECT id, table_id FROM " << META_TABLES << " " - << "WHERE state = " << std::to_string(TableSchema::TO_DELETE) << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - - if (!res.empty()) { - std::stringstream idsToDeleteSS; - for (auto& resRow : res) { - size_t id = resRow["id"]; - std::string table_id; - resRow["table_id"].to_string(table_id); - - utils::DeleteTablePath(options_, table_id, false); // only delete empty folder - - idsToDeleteSS << "id = " << std::to_string(id) << " OR "; - } - std::string idsToDeleteStr = idsToDeleteSS.str(); - idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " - cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLES << " " - << "WHERE " << idsToDeleteStr << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - if (!cleanUpFilesWithTTLQuery.exec()) { - return HandleException("QUERY ERROR WHEN CLEANING UP TABLES WITH TTL", - cleanUpFilesWithTTLQuery.error()); - } - } - - if (res.size() > 0) { - ENGINE_LOG_DEBUG << "Remove " << res.size() << " tables from meta"; - } - } // Scoped Connection - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); - } - - // remove deleted table folder - // don't remove table folder until all its files has been deleted - try { - server::MetricCollector metric; - - { - mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); - - if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); - } - - for (auto& table_id : table_ids) { - mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); - cleanUpFilesWithTTLQuery << "SELECT file_id FROM " << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << ";"; - - ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); - - mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); - - if (res.empty()) { - utils::DeleteTablePath(options_, table_id); - } - } - - if (table_ids.size() > 0) { - ENGINE_LOG_DEBUG << "Remove " << table_ids.size() << " tables folder"; - } - } - } catch (std::exception& e) { - return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); - } - - return Status::OK(); -} - Status MySQLMetaImpl::CleanUp() { try { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query cleanUpQuery = connectionPtr->query(); - cleanUpQuery << "SELECT table_name " - << "FROM information_schema.tables " - << "WHERE table_schema = " << mysqlpp::quote << mysql_connection_pool_->getDB() << " " - << "AND table_name = " << mysqlpp::quote << META_TABLEFILES << ";"; + cleanUpQuery << "SELECT table_name" + << " FROM information_schema.tables" + << " WHERE table_schema = " << mysqlpp::quote << mysql_connection_pool_->getDB() + << " AND table_name = " << mysqlpp::quote << META_TABLEFILES << ";"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUp: " << cleanUpQuery.str(); @@ -1926,6 +1720,164 @@ MySQLMetaImpl::CleanUp() { return Status::OK(); } +Status +MySQLMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { + auto now = utils::GetMicroSecTimeStamp(); + std::set table_ids; + + // remove to_delete files + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); + cleanUpFilesWithTTLQuery << "SELECT id, table_id, file_id, date" + << " FROM " << META_TABLEFILES + << " WHERE file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " AND updated_time < " << std::to_string(now - seconds * US_PS) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); + + TableFileSchema table_file; + std::vector idsToDelete; + + for (auto& resRow : res) { + table_file.id_ = resRow["id"]; // implicit conversion + resRow["table_id"].to_string(table_file.table_id_); + resRow["file_id"].to_string(table_file.file_id_); + table_file.date_ = resRow["date"]; + + utils::DeleteTableFilePath(options_, table_file); + + ENGINE_LOG_DEBUG << "Removing file id:" << table_file.id_ << " location:" << table_file.location_; + + idsToDelete.emplace_back(std::to_string(table_file.id_)); + table_ids.insert(table_file.table_id_); + } + + if (!idsToDelete.empty()) { + std::stringstream idsToDeleteSS; + for (auto& id : idsToDelete) { + idsToDeleteSS << "id = " << id << " OR "; + } + + std::string idsToDeleteStr = idsToDeleteSS.str(); + idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " + cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLEFILES << " WHERE " << idsToDeleteStr << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + if (!cleanUpFilesWithTTLQuery.exec()) { + return HandleException("QUERY ERROR WHEN CLEANING UP FILES WITH TTL", + cleanUpFilesWithTTLQuery.error()); + } + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Clean " << res.size() << " files deleted in " << seconds << " seconds"; + } + } // Scoped Connection + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CLEANING UP FILES WITH TTL", e.what()); + } + + // remove to_delete tables + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); + cleanUpFilesWithTTLQuery << "SELECT id, table_id" + << " FROM " << META_TABLES + << " WHERE state = " << std::to_string(TableSchema::TO_DELETE) << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); + + if (!res.empty()) { + std::stringstream idsToDeleteSS; + for (auto& resRow : res) { + size_t id = resRow["id"]; + std::string table_id; + resRow["table_id"].to_string(table_id); + + utils::DeleteTablePath(options_, table_id, false); // only delete empty folder + + idsToDeleteSS << "id = " << std::to_string(id) << " OR "; + } + std::string idsToDeleteStr = idsToDeleteSS.str(); + idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR " + cleanUpFilesWithTTLQuery << "DELETE FROM " << META_TABLES << " WHERE " << idsToDeleteStr << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + if (!cleanUpFilesWithTTLQuery.exec()) { + return HandleException("QUERY ERROR WHEN CLEANING UP TABLES WITH TTL", + cleanUpFilesWithTTLQuery.error()); + } + } + + if (res.size() > 0) { + ENGINE_LOG_DEBUG << "Remove " << res.size() << " tables from meta"; + } + } // Scoped Connection + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); + } + + // remove deleted table folder + // don't remove table folder until all its files has been deleted + try { + server::MetricCollector metric; + + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + for (auto& table_id : table_ids) { + mysqlpp::Query cleanUpFilesWithTTLQuery = connectionPtr->query(); + cleanUpFilesWithTTLQuery << "SELECT file_id" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote + << table_id << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::CleanUpFilesWithTTL: " << cleanUpFilesWithTTLQuery.str(); + + mysqlpp::StoreQueryResult res = cleanUpFilesWithTTLQuery.store(); + + if (res.empty()) { + utils::DeleteTablePath(options_, table_id); + } + } + + if (table_ids.size() > 0) { + ENGINE_LOG_DEBUG << "Remove " << table_ids.size() << " tables folder"; + } + } + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN CLEANING UP TABLES WITH TTL", e.what()); + } + + return Status::OK(); +} + Status MySQLMetaImpl::Count(const std::string& table_id, uint64_t& result) { try { @@ -1944,15 +1896,15 @@ MySQLMetaImpl::Count(const std::string& table_id, uint64_t& result) { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query countQuery = connectionPtr->query(); - countQuery << "SELECT row_count FROM " << META_TABLEFILES << " " - << "WHERE table_id = " << mysqlpp::quote << table_id << " AND " - << "(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " - << "file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " - << "file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; + countQuery << "SELECT row_count" + << " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << table_id + << " AND (file_type = " << std::to_string(TableFileSchema::RAW) + << " OR file_type = " << std::to_string(TableFileSchema::TO_INDEX) + << " OR file_type = " << std::to_string(TableFileSchema::INDEX) << ");"; ENGINE_LOG_DEBUG << "MySQLMetaImpl::Count: " << countQuery.str(); @@ -1978,7 +1930,7 @@ MySQLMetaImpl::DropAll() { mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); if (connectionPtr == nullptr) { - return Status(DB_ERROR, "Failed to connect to database server"); + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); } mysqlpp::Query dropTableQuery = connectionPtr->query(); @@ -1995,6 +1947,72 @@ MySQLMetaImpl::DropAll() { } } +Status +MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) { + if (to_discard_size <= 0) { + return Status::OK(); + } + ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; + + try { + server::MetricCollector metric; + bool status; + { + mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_); + + if (connectionPtr == nullptr) { + return Status(DB_ERROR, "Failed to connect to meta server(mysql)"); + } + + mysqlpp::Query discardFilesQuery = connectionPtr->query(); + discardFilesQuery << "SELECT id, file_size" + << " FROM " << META_TABLEFILES << " WHERE file_type <> " + << std::to_string(TableFileSchema::TO_DELETE) << " ORDER BY id ASC " + << " LIMIT 10;"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); + + mysqlpp::StoreQueryResult res = discardFilesQuery.store(); + if (res.num_rows() == 0) { + return Status::OK(); + } + + TableFileSchema table_file; + std::stringstream idsToDiscardSS; + for (auto& resRow : res) { + if (to_discard_size <= 0) { + break; + } + table_file.id_ = resRow["id"]; + table_file.file_size_ = resRow["file_size"]; + idsToDiscardSS << "id = " << std::to_string(table_file.id_) << " OR "; + ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ + << " table_file.size=" << table_file.file_size_; + to_discard_size -= table_file.file_size_; + } + + std::string idsToDiscardStr = idsToDiscardSS.str(); + idsToDiscardStr = idsToDiscardStr.substr(0, idsToDiscardStr.size() - 4); // remove the last " OR " + + discardFilesQuery << "UPDATE " << META_TABLEFILES + << " SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) + << " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " WHERE " + << idsToDiscardStr << ";"; + + ENGINE_LOG_DEBUG << "MySQLMetaImpl::DiscardFiles: " << discardFilesQuery.str(); + + status = discardFilesQuery.exec(); + if (!status) { + return HandleException("QUERY ERROR WHEN DISCARDING FILES", discardFilesQuery.error()); + } + } // Scoped Connection + + return DiscardFiles(to_discard_size); + } catch (std::exception& e) { + return HandleException("GENERAL ERROR WHEN DISCARDING FILES", e.what()); + } +} + } // namespace meta } // namespace engine } // namespace milvus diff --git a/core/src/db/meta/MySQLMetaImpl.h b/core/src/db/meta/MySQLMetaImpl.h index bb7fb5b59f..00b7627548 100644 --- a/core/src/db/meta/MySQLMetaImpl.h +++ b/core/src/db/meta/MySQLMetaImpl.h @@ -49,7 +49,7 @@ class MySQLMetaImpl : public Meta { AllTables(std::vector& table_schema_array) override; Status - DeleteTable(const std::string& table_id) override; + DropTable(const std::string& table_id) override; Status DeleteTableFiles(const std::string& table_id) override; @@ -58,27 +58,17 @@ class MySQLMetaImpl : public Meta { CreateTableFile(TableFileSchema& file_schema) override; Status - DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override; + DropDataByDate(const std::string& table_id, const DatesT& dates) override; Status GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) override; - Status - FilesByType(const std::string& table_id, const std::vector& file_types, - std::vector& file_ids) override; - Status UpdateTableIndex(const std::string& table_id, const TableIndex& index) override; Status UpdateTableFlag(const std::string& table_id, int64_t flag) override; - Status - DescribeTableIndex(const std::string& table_id, TableIndex& index) override; - - Status - DropTableIndex(const std::string& table_id) override; - Status UpdateTableFile(TableFileSchema& file_schema) override; @@ -88,6 +78,24 @@ class MySQLMetaImpl : public Meta { Status UpdateTableFiles(TableFilesSchema& files) override; + Status + DescribeTableIndex(const std::string& table_id, TableIndex& index) override; + + Status + DropTableIndex(const std::string& table_id) override; + + Status + CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override; + + Status + DropPartition(const std::string& partition_name) override; + + Status + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) override; + + Status + GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override; + Status FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, DatePartionedTableFilesSchema& files) override; @@ -98,6 +106,10 @@ class MySQLMetaImpl : public Meta { Status FilesToIndex(TableFilesSchema&) override; + Status + FilesByType(const std::string& table_id, const std::vector& file_types, + std::vector& file_ids) override; + Status Archive() override; diff --git a/core/src/db/meta/SqliteMetaImpl.cpp b/core/src/db/meta/SqliteMetaImpl.cpp index cf2a8d77cf..6221dd8ac1 100644 --- a/core/src/db/meta/SqliteMetaImpl.cpp +++ b/core/src/db/meta/SqliteMetaImpl.cpp @@ -57,26 +57,33 @@ HandleException(const std::string& desc, const char* what = nullptr) { } // namespace inline auto -StoragePrototype(const std::string& path) { - return make_storage( - path, - make_table(META_TABLES, make_column("id", &TableSchema::id_, primary_key()), - make_column("table_id", &TableSchema::table_id_, unique()), - make_column("state", &TableSchema::state_), make_column("dimension", &TableSchema::dimension_), - make_column("created_on", &TableSchema::created_on_), - make_column("flag", &TableSchema::flag_, default_value(0)), - make_column("index_file_size", &TableSchema::index_file_size_), - make_column("engine_type", &TableSchema::engine_type_), make_column("nlist", &TableSchema::nlist_), - make_column("metric_type", &TableSchema::metric_type_)), - make_table( - META_TABLEFILES, make_column("id", &TableFileSchema::id_, primary_key()), - make_column("table_id", &TableFileSchema::table_id_), - make_column("engine_type", &TableFileSchema::engine_type_), - make_column("file_id", &TableFileSchema::file_id_), make_column("file_type", &TableFileSchema::file_type_), - make_column("file_size", &TableFileSchema::file_size_, default_value(0)), - make_column("row_count", &TableFileSchema::row_count_, default_value(0)), - make_column("updated_time", &TableFileSchema::updated_time_), - make_column("created_on", &TableFileSchema::created_on_), make_column("date", &TableFileSchema::date_))); +StoragePrototype(const std::string &path) { + return make_storage(path, + make_table(META_TABLES, + make_column("id", &TableSchema::id_, primary_key()), + make_column("table_id", &TableSchema::table_id_, unique()), + make_column("state", &TableSchema::state_), + make_column("dimension", &TableSchema::dimension_), + make_column("created_on", &TableSchema::created_on_), + make_column("flag", &TableSchema::flag_, default_value(0)), + make_column("index_file_size", &TableSchema::index_file_size_), + make_column("engine_type", &TableSchema::engine_type_), + make_column("nlist", &TableSchema::nlist_), + make_column("metric_type", &TableSchema::metric_type_), + make_column("owner_table", &TableSchema::owner_table_, default_value("")), + make_column("partition_tag", &TableSchema::partition_tag_, default_value("")), + make_column("version", &TableSchema::version_, default_value(CURRENT_VERSION))), + make_table(META_TABLEFILES, + make_column("id", &TableFileSchema::id_, primary_key()), + make_column("table_id", &TableFileSchema::table_id_), + make_column("engine_type", &TableFileSchema::engine_type_), + make_column("file_id", &TableFileSchema::file_id_), + make_column("file_type", &TableFileSchema::file_type_), + make_column("file_size", &TableFileSchema::file_size_, default_value(0)), + make_column("row_count", &TableFileSchema::row_count_, default_value(0)), + make_column("updated_time", &TableFileSchema::updated_time_), + make_column("created_on", &TableFileSchema::created_on_), + make_column("date", &TableFileSchema::date_))); } using ConnectorT = decltype(StoragePrototype("")); @@ -151,9 +158,247 @@ SqliteMetaImpl::Initialize() { return Status::OK(); } +Status +SqliteMetaImpl::CreateTable(TableSchema &table_schema) { + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + if (table_schema.table_id_ == "") { + NextTableId(table_schema.table_id_); + } else { + auto table = ConnectorPtr->select(columns(&TableSchema::state_), + where(c(&TableSchema::table_id_) == table_schema.table_id_)); + if (table.size() == 1) { + if (TableSchema::TO_DELETE == std::get<0>(table[0])) { + return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); + } else { + // Change from no error to already exist. + return Status(DB_ALREADY_EXIST, "Table already exists"); + } + } + } + + table_schema.id_ = -1; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + + try { + auto id = ConnectorPtr->insert(table_schema); + table_schema.id_ = id; + } catch (std::exception &e) { + return HandleException("Encounter exception when create table", e.what()); + } + + ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; + + return utils::CreateTablePath(options_, table_schema.table_id_); + } catch (std::exception &e) { + return HandleException("Encounter exception when create table", e.what()); + } +} + +Status +SqliteMetaImpl::DescribeTable(TableSchema &table_schema) { + try { + server::MetricCollector metric; + + auto groups = ConnectorPtr->select(columns(&TableSchema::id_, + &TableSchema::state_, + &TableSchema::dimension_, + &TableSchema::created_on_, + &TableSchema::flag_, + &TableSchema::index_file_size_, + &TableSchema::engine_type_, + &TableSchema::nlist_, + &TableSchema::metric_type_, + &TableSchema::owner_table_, + &TableSchema::partition_tag_, + &TableSchema::version_), + where(c(&TableSchema::table_id_) == table_schema.table_id_ + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + + if (groups.size() == 1) { + table_schema.id_ = std::get<0>(groups[0]); + table_schema.state_ = std::get<1>(groups[0]); + table_schema.dimension_ = std::get<2>(groups[0]); + table_schema.created_on_ = std::get<3>(groups[0]); + table_schema.flag_ = std::get<4>(groups[0]); + table_schema.index_file_size_ = std::get<5>(groups[0]); + table_schema.engine_type_ = std::get<6>(groups[0]); + table_schema.nlist_ = std::get<7>(groups[0]); + table_schema.metric_type_ = std::get<8>(groups[0]); + table_schema.owner_table_ = std::get<9>(groups[0]); + table_schema.partition_tag_ = std::get<10>(groups[0]); + table_schema.version_ = std::get<11>(groups[0]); + } else { + return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when describe table", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) { + has_or_not = false; + + try { + server::MetricCollector metric; + auto tables = ConnectorPtr->select(columns(&TableSchema::id_), + where(c(&TableSchema::table_id_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + if (tables.size() == 1) { + has_or_not = true; + } else { + has_or_not = false; + } + } catch (std::exception &e) { + return HandleException("Encounter exception when lookup table", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::AllTables(std::vector &table_schema_array) { + try { + server::MetricCollector metric; + + auto selected = ConnectorPtr->select(columns(&TableSchema::id_, + &TableSchema::table_id_, + &TableSchema::dimension_, + &TableSchema::created_on_, + &TableSchema::flag_, + &TableSchema::index_file_size_, + &TableSchema::engine_type_, + &TableSchema::nlist_, + &TableSchema::metric_type_, + &TableSchema::owner_table_, + &TableSchema::partition_tag_, + &TableSchema::version_), + where(c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + for (auto &table : selected) { + TableSchema schema; + schema.id_ = std::get<0>(table); + schema.table_id_ = std::get<1>(table); + schema.dimension_ = std::get<2>(table); + schema.created_on_ = std::get<3>(table); + schema.flag_ = std::get<4>(table); + schema.index_file_size_ = std::get<5>(table); + schema.engine_type_ = std::get<6>(table); + schema.nlist_ = std::get<7>(table); + schema.metric_type_ = std::get<8>(table); + schema.owner_table_ = std::get<9>(table); + schema.partition_tag_ = std::get<10>(table); + schema.version_ = std::get<11>(table); + + table_schema_array.emplace_back(schema); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when lookup all tables", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::DropTable(const std::string &table_id) { + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + //soft delete table + ConnectorPtr->update_all( + set( + c(&TableSchema::state_) = (int) TableSchema::TO_DELETE), + where( + c(&TableSchema::table_id_) == table_id and + c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + + ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; + } catch (std::exception &e) { + return HandleException("Encounter exception when delete table", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::DeleteTableFiles(const std::string &table_id) { + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + //soft delete table files + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + + ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; + } catch (std::exception &e) { + return HandleException("Encounter exception when delete table files", e.what()); + } + + return Status::OK(); +} + +Status +SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) { + if (file_schema.date_ == EmptyDate) { + file_schema.date_ = utils::GetDate(); + } + TableSchema table_schema; + table_schema.table_id_ = file_schema.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + + try { + server::MetricCollector metric; + + NextFileId(file_schema.file_id_); + file_schema.dimension_ = table_schema.dimension_; + file_schema.file_size_ = 0; + file_schema.row_count_ = 0; + file_schema.created_on_ = utils::GetMicroSecTimeStamp(); + file_schema.updated_time_ = file_schema.created_on_; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.engine_type_ = table_schema.engine_type_; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + auto id = ConnectorPtr->insert(file_schema); + file_schema.id_ = id; + + ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; + return utils::CreateTableFilePath(options_, file_schema); + } catch (std::exception &e) { + return HandleException("Encounter exception when create table file", e.what()); + } + + return Status::OK(); +} + // TODO(myh): Delete single vecotor by id Status -SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& dates) { +SqliteMetaImpl::DropDataByDate(const std::string &table_id, + const DatesT &dates) { if (dates.empty()) { return Status::OK(); } @@ -193,8 +438,8 @@ SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::date_, batch_dates))); } - ENGINE_LOG_DEBUG << "Successfully drop partitions, table id = " << table_schema.table_id_; - } catch (std::exception& e) { + ENGINE_LOG_DEBUG << "Successfully drop data by date, table id = " << table_schema.table_id_; + } catch (std::exception &e) { return HandleException("Encounter exception when drop partition", e.what()); } @@ -202,173 +447,149 @@ SqliteMetaImpl::DropPartitionsByDates(const std::string& table_id, const DatesT& } Status -SqliteMetaImpl::CreateTable(TableSchema& table_schema) { +SqliteMetaImpl::GetTableFiles(const std::string &table_id, + const std::vector &ids, + TableFilesSchema &table_files) { try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - if (table_schema.table_id_ == "") { - NextTableId(table_schema.table_id_); - } else { - auto table = ConnectorPtr->select(columns(&TableSchema::state_), - where(c(&TableSchema::table_id_) == table_schema.table_id_)); - if (table.size() == 1) { - if (TableSchema::TO_DELETE == std::get<0>(table[0])) { - return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second"); - } else { - // Change from no error to already exist. - return Status(DB_ALREADY_EXIST, "Table already exists"); - } - } + table_files.clear(); + auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::file_id_, + &TableFileSchema::file_type_, + &TableFileSchema::file_size_, + &TableFileSchema::row_count_, + &TableFileSchema::date_, + &TableFileSchema::engine_type_, + &TableFileSchema::created_on_), + where(c(&TableFileSchema::table_id_) == table_id and + in(&TableFileSchema::id_, ids) and + c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + TableSchema table_schema; + table_schema.table_id_ = table_id; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; } - table_schema.id_ = -1; - table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + Status result; + for (auto &file : files) { + TableFileSchema file_schema; + file_schema.table_id_ = table_id; + file_schema.id_ = std::get<0>(file); + file_schema.file_id_ = std::get<1>(file); + file_schema.file_type_ = std::get<2>(file); + file_schema.file_size_ = std::get<3>(file); + file_schema.row_count_ = std::get<4>(file); + file_schema.date_ = std::get<5>(file); + file_schema.engine_type_ = std::get<6>(file); + file_schema.created_on_ = std::get<7>(file); + file_schema.dimension_ = table_schema.dimension_; + file_schema.index_file_size_ = table_schema.index_file_size_; + file_schema.nlist_ = table_schema.nlist_; + file_schema.metric_type_ = table_schema.metric_type_; - try { - auto id = ConnectorPtr->insert(table_schema); - table_schema.id_ = id; - } catch (std::exception& e) { - return HandleException("Encounter exception when create table", e.what()); + utils::GetTableFilePath(options_, file_schema); + + table_files.emplace_back(file_schema); } - ENGINE_LOG_DEBUG << "Successfully create table: " << table_schema.table_id_; - - return utils::CreateTablePath(options_, table_schema.table_id_); - } catch (std::exception& e) { - return HandleException("Encounter exception when create table", e.what()); + ENGINE_LOG_DEBUG << "Get table files by id"; + return result; + } catch (std::exception &e) { + return HandleException("Encounter exception when lookup table files", e.what()); } } Status -SqliteMetaImpl::DeleteTable(const std::string& table_id) { +SqliteMetaImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) { try { server::MetricCollector metric; - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - // soft delete table + //set all backup file to raw ConnectorPtr->update_all( - set(c(&TableSchema::state_) = (int)TableSchema::TO_DELETE), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - - ENGINE_LOG_DEBUG << "Successfully delete table, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("Encounter exception when delete table", e.what()); + set( + c(&TableSchema::flag_) = flag), + where( + c(&TableSchema::table_id_) == table_id)); + ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; + } catch (std::exception &e) { + std::string msg = "Encounter exception when update table flag: table_id = " + table_id; + return HandleException(msg, e.what()); } return Status::OK(); } Status -SqliteMetaImpl::DeleteTableFiles(const std::string& table_id) { +SqliteMetaImpl::UpdateTableFile(TableFileSchema &file_schema) { + file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); try { server::MetricCollector metric; // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - // soft delete table files - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&TableSchema::state_), + where(c(&TableSchema::table_id_) == file_schema.table_id_)); - ENGINE_LOG_DEBUG << "Successfully delete table files, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("Encounter exception when delete table files", e.what()); + //if the table has been deleted, just mark the table file as TO_DELETE + //clean thread will delete the file later + if (tables.size() < 1 || std::get<0>(tables[0]) == (int) TableSchema::TO_DELETE) { + file_schema.file_type_ = TableFileSchema::TO_DELETE; + } + + ConnectorPtr->update(file_schema); + + ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; + } catch (std::exception &e) { + std::string msg = "Exception update table file: table_id = " + file_schema.table_id_ + + " file_id = " + file_schema.file_id_; + return HandleException(msg, e.what()); } - return Status::OK(); } Status -SqliteMetaImpl::DescribeTable(TableSchema& table_schema) { +SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) { try { server::MetricCollector metric; - auto groups = - ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, - &TableSchema::created_on_, &TableSchema::flag_, &TableSchema::index_file_size_, - &TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_schema.table_id_ and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); - if (groups.size() == 1) { - table_schema.id_ = std::get<0>(groups[0]); - table_schema.state_ = std::get<1>(groups[0]); - table_schema.dimension_ = std::get<2>(groups[0]); - table_schema.created_on_ = std::get<3>(groups[0]); - table_schema.flag_ = std::get<4>(groups[0]); - table_schema.index_file_size_ = std::get<5>(groups[0]); - table_schema.engine_type_ = std::get<6>(groups[0]); - table_schema.nlist_ = std::get<7>(groups[0]); - table_schema.metric_type_ = std::get<8>(groups[0]); - } else { - return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found"); - } - } catch (std::exception& e) { - return HandleException("Encounter exception when describe table", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::FilesByType(const std::string& table_id, const std::vector& file_types, - std::vector& file_ids) { - if (file_types.empty()) { - return Status(DB_ERROR, "file types array is empty"); - } - - try { - file_ids.clear(); - auto selected = ConnectorPtr->select( - columns(&TableFileSchema::file_id_, &TableFileSchema::file_type_), - where(in(&TableFileSchema::file_type_, file_types) and c(&TableFileSchema::table_id_) == table_id)); - - if (selected.size() >= 1) { - int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; - int to_index_count = 0, index_count = 0, backup_count = 0; - for (auto& file : selected) { - file_ids.push_back(std::get<0>(file)); - switch (std::get<1>(file)) { - case (int)TableFileSchema::RAW: - raw_count++; - break; - case (int)TableFileSchema::NEW: - new_count++; - break; - case (int)TableFileSchema::NEW_MERGE: - new_merge_count++; - break; - case (int)TableFileSchema::NEW_INDEX: - new_index_count++; - break; - case (int)TableFileSchema::TO_INDEX: - to_index_count++; - break; - case (int)TableFileSchema::INDEX: - index_count++; - break; - case (int)TableFileSchema::BACKUP: - backup_count++; - break; - default: - break; - } + std::map has_tables; + for (auto &file : files) { + if (has_tables.find(file.table_id_) != has_tables.end()) { + continue; + } + auto tables = ConnectorPtr->select(columns(&TableSchema::id_), + where(c(&TableSchema::table_id_) == file.table_id_ + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + if (tables.size() >= 1) { + has_tables[file.table_id_] = true; + } else { + has_tables[file.table_id_] = false; } - - ENGINE_LOG_DEBUG << "Table " << table_id << " currently has raw files:" << raw_count - << " new files:" << new_count << " new_merge files:" << new_merge_count - << " new_index files:" << new_index_count << " to_index files:" << to_index_count - << " index files:" << index_count << " backup files:" << backup_count; } - } catch (std::exception& e) { - return HandleException("Encounter exception when check non index files", e.what()); + + auto commited = ConnectorPtr->transaction([&]() mutable { + for (auto &file : files) { + if (!has_tables[file.table_id_]) { + file.file_type_ = TableFileSchema::TO_DELETE; + } + + file.updated_time_ = utils::GetMicroSecTimeStamp(); + ConnectorPtr->update(file); + } + return true; + }); + + if (!commited) { + return HandleException("UpdateTableFiles error: sqlite transaction failed"); + } + + ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; + } catch (std::exception &e) { + return HandleException("Encounter exception when update table files", e.what()); } return Status::OK(); } @@ -381,10 +602,17 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto tables = ConnectorPtr->select( - columns(&TableSchema::id_, &TableSchema::state_, &TableSchema::dimension_, &TableSchema::created_on_, - &TableSchema::flag_, &TableSchema::index_file_size_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + auto tables = ConnectorPtr->select(columns(&TableSchema::id_, + &TableSchema::state_, + &TableSchema::dimension_, + &TableSchema::created_on_, + &TableSchema::flag_, + &TableSchema::index_file_size_, + &TableSchema::owner_table_, + &TableSchema::partition_tag_, + &TableSchema::version_), + where(c(&TableSchema::table_id_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); if (tables.size() > 0) { meta::TableSchema table_schema; @@ -395,6 +623,9 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& table_schema.created_on_ = std::get<3>(tables[0]); table_schema.flag_ = std::get<4>(tables[0]); table_schema.index_file_size_ = std::get<5>(tables[0]); + table_schema.owner_table_ = std::get<6>(tables[0]); + table_schema.partition_tag_ = std::get<7>(tables[0]); + table_schema.version_ = std::get<8>(tables[0]); table_schema.engine_type_ = index.engine_type_; table_schema.nlist_ = index.nlist_; table_schema.metric_type_ = index.metric_type_; @@ -404,11 +635,14 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& return Status(DB_NOT_FOUND, "Table " + table_id + " not found"); } - // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); + //set all backup file to raw + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::RAW, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::BACKUP)); ENGINE_LOG_DEBUG << "Successfully update table index, table id = " << table_id; } catch (std::exception& e) { @@ -420,16 +654,23 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& table_id, const TableIndex& } Status -SqliteMetaImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) { +SqliteMetaImpl::UpdateTableFilesToIndex(const std::string &table_id) { try { server::MetricCollector metric; - // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableSchema::flag_) = flag), where(c(&TableSchema::table_id_) == table_id)); - ENGINE_LOG_DEBUG << "Successfully update table flag, table id = " << table_id; - } catch (std::exception& e) { - std::string msg = "Encounter exception when update table flag: table_id = " + table_id; - return HandleException(msg, e.what()); + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_INDEX), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW)); + + ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; + } catch (std::exception &e) { + return HandleException("Encounter exception when update table files to to_index", e.what()); } return Status::OK(); @@ -440,9 +681,11 @@ SqliteMetaImpl::DescribeTableIndex(const std::string& table_id, TableIndex& inde try { server::MetricCollector metric; - auto groups = ConnectorPtr->select( - columns(&TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); + auto groups = ConnectorPtr->select(columns(&TableSchema::engine_type_, + &TableSchema::nlist_, + &TableSchema::metric_type_), + where(c(&TableSchema::table_id_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); if (groups.size() == 1) { index.engine_type_ = std::get<0>(groups[0]); @@ -466,26 +709,35 @@ SqliteMetaImpl::DropTableIndex(const std::string& table_id) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - // soft delete index files - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::INDEX)); - - // set all backup file to raw - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::RAW, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::BACKUP)); - - // set table index type to raw + //soft delete index files ConnectorPtr->update_all( - set(c(&TableSchema::engine_type_) = DEFAULT_ENGINE_TYPE, c(&TableSchema::nlist_) = DEFAULT_NLIST, + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::INDEX)); + + //set all backup file to raw + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::RAW, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + c(&TableFileSchema::table_id_) == table_id and + c(&TableFileSchema::file_type_) == (int) TableFileSchema::BACKUP)); + + //set table index type to raw + ConnectorPtr->update_all( + set( + c(&TableSchema::engine_type_) = DEFAULT_ENGINE_TYPE, + c(&TableSchema::nlist_) = DEFAULT_NLIST, c(&TableSchema::metric_type_) = DEFAULT_METRIC_TYPE), - where(c(&TableSchema::table_id_) == table_id)); + where( + c(&TableSchema::table_id_) == table_id)); ENGINE_LOG_DEBUG << "Successfully drop table index, table id = " << table_id; - } catch (std::exception& e) { + } catch (std::exception &e) { return HandleException("Encounter exception when delete table index files", e.what()); } @@ -493,158 +745,94 @@ SqliteMetaImpl::DropTableIndex(const std::string& table_id) { } Status -SqliteMetaImpl::HasTable(const std::string& table_id, bool& has_or_not) { - has_or_not = false; +SqliteMetaImpl::CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) { + server::MetricCollector metric; - try { - server::MetricCollector metric; - auto tables = ConnectorPtr->select( - columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == table_id and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - if (tables.size() == 1) { - has_or_not = true; - } else { - has_or_not = false; - } - } catch (std::exception& e) { - return HandleException("Encounter exception when lookup table", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::AllTables(std::vector& table_schema_array) { - try { - server::MetricCollector metric; - - auto selected = - ConnectorPtr->select(columns(&TableSchema::id_, &TableSchema::table_id_, &TableSchema::dimension_, - &TableSchema::created_on_, &TableSchema::flag_, &TableSchema::index_file_size_, - &TableSchema::engine_type_, &TableSchema::nlist_, &TableSchema::metric_type_), - where(c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - for (auto& table : selected) { - TableSchema schema; - schema.id_ = std::get<0>(table); - schema.table_id_ = std::get<1>(table); - schema.dimension_ = std::get<2>(table); - schema.created_on_ = std::get<3>(table); - schema.flag_ = std::get<4>(table); - schema.index_file_size_ = std::get<5>(table); - schema.engine_type_ = std::get<6>(table); - schema.nlist_ = std::get<7>(table); - schema.metric_type_ = std::get<8>(table); - - table_schema_array.emplace_back(schema); - } - } catch (std::exception& e) { - return HandleException("Encounter exception when lookup all tables", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::CreateTableFile(TableFileSchema& file_schema) { - if (file_schema.date_ == EmptyDate) { - file_schema.date_ = utils::GetDate(); - } TableSchema table_schema; - table_schema.table_id_ = file_schema.table_id_; + table_schema.table_id_ = table_id; auto status = DescribeTable(table_schema); if (!status.ok()) { return status; } + // not allow create partition under partition + if(!table_schema.owner_table_.empty()) { + return Status(DB_ERROR, "Nested partition is not allow"); + } + + if (partition_name == "") { + // not allow duplicated partition + std::string exist_partition; + GetPartitionName(table_id, tag, exist_partition); + if(!exist_partition.empty()) { + return Status(DB_ERROR, "Duplicated partition is not allow"); + } + + NextTableId(table_schema.table_id_); + } else { + table_schema.table_id_ = partition_name; + } + + table_schema.id_ = -1; + table_schema.flag_ = 0; + table_schema.created_on_ = utils::GetMicroSecTimeStamp(); + table_schema.owner_table_ = table_id; + table_schema.partition_tag_ = tag; + + return CreateTable(table_schema); +} + +Status +SqliteMetaImpl::DropPartition(const std::string& partition_name) { + return DropTable(partition_name); +} + +Status +SqliteMetaImpl::ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) { try { server::MetricCollector metric; - NextFileId(file_schema.file_id_); - file_schema.dimension_ = table_schema.dimension_; - file_schema.file_size_ = 0; - file_schema.row_count_ = 0; - file_schema.created_on_ = utils::GetMicroSecTimeStamp(); - file_schema.updated_time_ = file_schema.created_on_; - file_schema.index_file_size_ = table_schema.index_file_size_; - file_schema.engine_type_ = table_schema.engine_type_; - file_schema.nlist_ = table_schema.nlist_; - file_schema.metric_type_ = table_schema.metric_type_; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - auto id = ConnectorPtr->insert(file_schema); - file_schema.id_ = id; - - ENGINE_LOG_DEBUG << "Successfully create table file, file id = " << file_schema.file_id_; - return utils::CreateTableFilePath(options_, file_schema); - } catch (std::exception& e) { - return HandleException("Encounter exception when create table file", e.what()); + auto partitions = ConnectorPtr->select(columns(&TableSchema::table_id_), + where(c(&TableSchema::owner_table_) == table_id + and c(&TableSchema::state_) != (int) TableSchema::TO_DELETE)); + for(size_t i = 0; i < partitions.size(); i++) { + std::string partition_name = std::get<0>(partitions[i]); + meta::TableSchema partition_schema; + partition_schema.table_id_ = partition_name; + DescribeTable(partition_schema); + partiton_schema_array.emplace_back(partition_schema); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when show partitions", e.what()); } return Status::OK(); } Status -SqliteMetaImpl::FilesToIndex(TableFilesSchema& files) { - files.clear(); - +SqliteMetaImpl::GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) { try { server::MetricCollector metric; - auto selected = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::table_id_, &TableFileSchema::file_id_, - &TableFileSchema::file_type_, &TableFileSchema::file_size_, &TableFileSchema::row_count_, - &TableFileSchema::date_, &TableFileSchema::engine_type_, &TableFileSchema::created_on_), - where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_INDEX)); - - std::map groups; - TableFileSchema table_file; - - Status ret; - for (auto& file : selected) { - table_file.id_ = std::get<0>(file); - table_file.table_id_ = std::get<1>(file); - table_file.file_id_ = std::get<2>(file); - table_file.file_type_ = std::get<3>(file); - table_file.file_size_ = std::get<4>(file); - table_file.row_count_ = std::get<5>(file); - table_file.date_ = std::get<6>(file); - table_file.engine_type_ = std::get<7>(file); - table_file.created_on_ = std::get<8>(file); - - auto status = utils::GetTableFilePath(options_, table_file); - if (!status.ok()) { - ret = status; - } - auto groupItr = groups.find(table_file.table_id_); - if (groupItr == groups.end()) { - TableSchema table_schema; - table_schema.table_id_ = table_file.table_id_; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; - } - groups[table_file.table_id_] = table_schema; - } - table_file.dimension_ = groups[table_file.table_id_].dimension_; - table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; - table_file.nlist_ = groups[table_file.table_id_].nlist_; - table_file.metric_type_ = groups[table_file.table_id_].metric_type_; - files.push_back(table_file); + auto name = ConnectorPtr->select(columns(&TableSchema::table_id_), + where(c(&TableSchema::owner_table_) == table_id + and c(&TableSchema::partition_tag_) == tag)); + if (name.size() > 0) { + partition_name = std::get<0>(name[0]); + } else { + return Status(DB_NOT_FOUND, "Table " + table_id + "'s partition " + tag + " not found"); } - - if (selected.size() > 0) { - ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files"; - } - return ret; - } catch (std::exception& e) { - return HandleException("Encounter exception when iterate raw files", e.what()); + } catch (std::exception &e) { + return HandleException("Encounter exception when get partition name", e.what()); } + + return Status::OK(); } Status -SqliteMetaImpl::FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, +SqliteMetaImpl::FilesToSearch(const std::string& table_id, + const std::vector& ids, + const DatesT& dates, DatePartionedTableFilesSchema& files) { files.clear(); server::MetricCollector metric; @@ -824,53 +1012,120 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFile } Status -SqliteMetaImpl::GetTableFiles(const std::string& table_id, const std::vector& ids, - TableFilesSchema& table_files) { +SqliteMetaImpl::FilesToIndex(TableFilesSchema &files) { + files.clear(); + try { - table_files.clear(); - auto files = ConnectorPtr->select( - columns(&TableFileSchema::id_, &TableFileSchema::file_id_, &TableFileSchema::file_type_, - &TableFileSchema::file_size_, &TableFileSchema::row_count_, &TableFileSchema::date_, - &TableFileSchema::engine_type_, &TableFileSchema::created_on_), - where(c(&TableFileSchema::table_id_) == table_id and in(&TableFileSchema::id_, ids) and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); + server::MetricCollector metric; - TableSchema table_schema; - table_schema.table_id_ = table_id; - auto status = DescribeTable(table_schema); - if (!status.ok()) { - return status; + auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::table_id_, + &TableFileSchema::file_id_, + &TableFileSchema::file_type_, + &TableFileSchema::file_size_, + &TableFileSchema::row_count_, + &TableFileSchema::date_, + &TableFileSchema::engine_type_, + &TableFileSchema::created_on_), + where(c(&TableFileSchema::file_type_) + == (int) TableFileSchema::TO_INDEX)); + + std::map groups; + TableFileSchema table_file; + + Status ret; + for (auto &file : selected) { + table_file.id_ = std::get<0>(file); + table_file.table_id_ = std::get<1>(file); + table_file.file_id_ = std::get<2>(file); + table_file.file_type_ = std::get<3>(file); + table_file.file_size_ = std::get<4>(file); + table_file.row_count_ = std::get<5>(file); + table_file.date_ = std::get<6>(file); + table_file.engine_type_ = std::get<7>(file); + table_file.created_on_ = std::get<8>(file); + + auto status = utils::GetTableFilePath(options_, table_file); + if (!status.ok()) { + ret = status; + } + auto groupItr = groups.find(table_file.table_id_); + if (groupItr == groups.end()) { + TableSchema table_schema; + table_schema.table_id_ = table_file.table_id_; + auto status = DescribeTable(table_schema); + if (!status.ok()) { + return status; + } + groups[table_file.table_id_] = table_schema; + } + table_file.dimension_ = groups[table_file.table_id_].dimension_; + table_file.index_file_size_ = groups[table_file.table_id_].index_file_size_; + table_file.nlist_ = groups[table_file.table_id_].nlist_; + table_file.metric_type_ = groups[table_file.table_id_].metric_type_; + files.push_back(table_file); } - Status result; - for (auto& file : files) { - TableFileSchema file_schema; - file_schema.table_id_ = table_id; - file_schema.id_ = std::get<0>(file); - file_schema.file_id_ = std::get<1>(file); - file_schema.file_type_ = std::get<2>(file); - file_schema.file_size_ = std::get<3>(file); - file_schema.row_count_ = std::get<4>(file); - file_schema.date_ = std::get<5>(file); - file_schema.engine_type_ = std::get<6>(file); - file_schema.created_on_ = std::get<7>(file); - file_schema.dimension_ = table_schema.dimension_; - file_schema.index_file_size_ = table_schema.index_file_size_; - file_schema.nlist_ = table_schema.nlist_; - file_schema.metric_type_ = table_schema.metric_type_; - - utils::GetTableFilePath(options_, file_schema); - - table_files.emplace_back(file_schema); + if (selected.size() > 0) { + ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files"; } - - ENGINE_LOG_DEBUG << "Get table files by id"; - return result; - } catch (std::exception& e) { - return HandleException("Encounter exception when lookup table files", e.what()); + return ret; + } catch (std::exception &e) { + return HandleException("Encounter exception when iterate raw files", e.what()); } } +Status +SqliteMetaImpl::FilesByType(const std::string &table_id, + const std::vector &file_types, + std::vector &file_ids) { + if (file_types.empty()) { + return Status(DB_ERROR, "file types array is empty"); + } + + try { + file_ids.clear(); + auto selected = ConnectorPtr->select(columns(&TableFileSchema::file_id_, + &TableFileSchema::file_type_), + where(in(&TableFileSchema::file_type_, file_types) + and c(&TableFileSchema::table_id_) == table_id)); + + if (selected.size() >= 1) { + int raw_count = 0, new_count = 0, new_merge_count = 0, new_index_count = 0; + int to_index_count = 0, index_count = 0, backup_count = 0; + for (auto &file : selected) { + file_ids.push_back(std::get<0>(file)); + switch (std::get<1>(file)) { + case (int) TableFileSchema::RAW:raw_count++; + break; + case (int) TableFileSchema::NEW:new_count++; + break; + case (int) TableFileSchema::NEW_MERGE:new_merge_count++; + break; + case (int) TableFileSchema::NEW_INDEX:new_index_count++; + break; + case (int) TableFileSchema::TO_INDEX:to_index_count++; + break; + case (int) TableFileSchema::INDEX:index_count++; + break; + case (int) TableFileSchema::BACKUP:backup_count++; + break; + default:break; + } + } + + ENGINE_LOG_DEBUG << "Table " << table_id << " currently has raw files:" << raw_count + << " new files:" << new_count << " new_merge files:" << new_merge_count + << " new_index files:" << new_index_count << " to_index files:" << to_index_count + << " index files:" << index_count << " backup files:" << backup_count; + } + } catch (std::exception &e) { + return HandleException("Encounter exception when check non index files", e.what()); + } + return Status::OK(); +} + + // TODO(myh): Support swap to cloud storage Status SqliteMetaImpl::Archive() { @@ -889,10 +1144,13 @@ SqliteMetaImpl::Archive() { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE), - where(c(&TableFileSchema::created_on_) < (int64_t)(now - usecs) and - c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE)); - } catch (std::exception& e) { + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE), + where( + c(&TableFileSchema::created_on_) < (int64_t) (now - usecs) and + c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE)); + } catch (std::exception &e) { return HandleException("Encounter exception when update table files", e.what()); } @@ -932,152 +1190,40 @@ SqliteMetaImpl::Size(uint64_t& result) { } Status -SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { - if (to_discard_size <= 0) { - return Status::OK(); - } - - ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; - +SqliteMetaImpl::CleanUp() { try { server::MetricCollector metric; // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); + std::vector file_types = { + (int) TableFileSchema::NEW, + (int) TableFileSchema::NEW_INDEX, + (int) TableFileSchema::NEW_MERGE + }; + auto files = + ConnectorPtr->select(columns(&TableFileSchema::id_), where(in(&TableFileSchema::file_type_, file_types))); + auto commited = ConnectorPtr->transaction([&]() mutable { - auto selected = - ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::file_size_), - where(c(&TableFileSchema::file_type_) != (int)TableFileSchema::TO_DELETE), - order_by(&TableFileSchema::id_), limit(10)); - - std::vector ids; - TableFileSchema table_file; - - for (auto& file : selected) { - if (to_discard_size <= 0) - break; - table_file.id_ = std::get<0>(file); - table_file.file_size_ = std::get<1>(file); - ids.push_back(table_file.id_); - ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ - << " table_file.size=" << table_file.file_size_; - to_discard_size -= table_file.file_size_; - } - - if (ids.size() == 0) { - return true; - } - - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_DELETE, - c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), - where(in(&TableFileSchema::id_, ids))); - - return true; - }); - - if (!commited) { - return HandleException("DiscardFiles error: sqlite transaction failed"); - } - } catch (std::exception& e) { - return HandleException("Encounter exception when discard table file", e.what()); - } - - return DiscardFiles(to_discard_size); -} - -Status -SqliteMetaImpl::UpdateTableFile(TableFileSchema& file_schema) { - file_schema.updated_time_ = utils::GetMicroSecTimeStamp(); - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - auto tables = ConnectorPtr->select(columns(&TableSchema::state_), - where(c(&TableSchema::table_id_) == file_schema.table_id_)); - - // if the table has been deleted, just mark the table file as TO_DELETE - // clean thread will delete the file later - if (tables.size() < 1 || std::get<0>(tables[0]) == (int)TableSchema::TO_DELETE) { - file_schema.file_type_ = TableFileSchema::TO_DELETE; - } - - ConnectorPtr->update(file_schema); - - ENGINE_LOG_DEBUG << "Update single table file, file id = " << file_schema.file_id_; - } catch (std::exception& e) { - std::string msg = - "Exception update table file: table_id = " + file_schema.table_id_ + " file_id = " + file_schema.file_id_; - return HandleException(msg, e.what()); - } - return Status::OK(); -} - -Status -SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) { - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - ConnectorPtr->update_all(set(c(&TableFileSchema::file_type_) = (int)TableFileSchema::TO_INDEX), - where(c(&TableFileSchema::table_id_) == table_id and - c(&TableFileSchema::file_type_) == (int)TableFileSchema::RAW)); - - ENGINE_LOG_DEBUG << "Update files to to_index, table id = " << table_id; - } catch (std::exception& e) { - return HandleException("Encounter exception when update table files to to_index", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::UpdateTableFiles(TableFilesSchema& files) { - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - std::map has_tables; - for (auto& file : files) { - if (has_tables.find(file.table_id_) != has_tables.end()) { - continue; - } - auto tables = ConnectorPtr->select(columns(&TableSchema::id_), - where(c(&TableSchema::table_id_) == file.table_id_ and - c(&TableSchema::state_) != (int)TableSchema::TO_DELETE)); - if (tables.size() >= 1) { - has_tables[file.table_id_] = true; - } else { - has_tables[file.table_id_] = false; - } - } - - auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto& file : files) { - if (!has_tables[file.table_id_]) { - file.file_type_ = TableFileSchema::TO_DELETE; - } - - file.updated_time_ = utils::GetMicroSecTimeStamp(); - ConnectorPtr->update(file); + for (auto &file : files) { + ENGINE_LOG_DEBUG << "Remove table file type as NEW"; + ConnectorPtr->remove(std::get<0>(file)); } return true; }); if (!commited) { - return HandleException("UpdateTableFiles error: sqlite transaction failed"); + return HandleException("CleanUp error: sqlite transaction failed"); } - ENGINE_LOG_DEBUG << "Update " << files.size() << " table files"; - } catch (std::exception& e) { - return HandleException("Encounter exception when update table files", e.what()); + if (files.size() > 0) { + ENGINE_LOG_DEBUG << "Clean " << files.size() << " files"; + } + } catch (std::exception &e) { + return HandleException("Encounter exception when clean table file", e.what()); } + return Status::OK(); } @@ -1093,10 +1239,16 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here std::lock_guard meta_lock(meta_mutex_); - auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, &TableFileSchema::table_id_, - &TableFileSchema::file_id_, &TableFileSchema::date_), - where(c(&TableFileSchema::file_type_) == (int)TableFileSchema::TO_DELETE and - c(&TableFileSchema::updated_time_) < now - seconds * US_PS)); + auto files = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::table_id_, + &TableFileSchema::file_id_, + &TableFileSchema::date_), + where( + c(&TableFileSchema::file_type_) == + (int) TableFileSchema::TO_DELETE + and + c(&TableFileSchema::updated_time_) + < now - seconds * US_PS)); auto commited = ConnectorPtr->transaction([&]() mutable { TableFileSchema table_file; @@ -1180,42 +1332,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) { } Status -SqliteMetaImpl::CleanUp() { - try { - server::MetricCollector metric; - - // multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here - std::lock_guard meta_lock(meta_mutex_); - - std::vector file_types = {(int)TableFileSchema::NEW, (int)TableFileSchema::NEW_INDEX, - (int)TableFileSchema::NEW_MERGE}; - auto files = - ConnectorPtr->select(columns(&TableFileSchema::id_), where(in(&TableFileSchema::file_type_, file_types))); - - auto commited = ConnectorPtr->transaction([&]() mutable { - for (auto& file : files) { - ENGINE_LOG_DEBUG << "Remove table file type as NEW"; - ConnectorPtr->remove(std::get<0>(file)); - } - return true; - }); - - if (!commited) { - return HandleException("CleanUp error: sqlite transaction failed"); - } - - if (files.size() > 0) { - ENGINE_LOG_DEBUG << "Clean " << files.size() << " files"; - } - } catch (std::exception& e) { - return HandleException("Encounter exception when clean table file", e.what()); - } - - return Status::OK(); -} - -Status -SqliteMetaImpl::Count(const std::string& table_id, uint64_t& result) { +SqliteMetaImpl::Count(const std::string &table_id, uint64_t &result) { try { server::MetricCollector metric; @@ -1257,6 +1374,66 @@ SqliteMetaImpl::DropAll() { return Status::OK(); } -} // namespace meta -} // namespace engine -} // namespace milvus +Status +SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) { + if (to_discard_size <= 0) { + return Status::OK(); + } + + ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size; + + try { + server::MetricCollector metric; + + //multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here + std::lock_guard meta_lock(meta_mutex_); + + auto commited = ConnectorPtr->transaction([&]() mutable { + auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_, + &TableFileSchema::file_size_), + where(c(&TableFileSchema::file_type_) + != (int) TableFileSchema::TO_DELETE), + order_by(&TableFileSchema::id_), + limit(10)); + + std::vector ids; + TableFileSchema table_file; + + for (auto &file : selected) { + if (to_discard_size <= 0) break; + table_file.id_ = std::get<0>(file); + table_file.file_size_ = std::get<1>(file); + ids.push_back(table_file.id_); + ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_ + << " table_file.size=" << table_file.file_size_; + to_discard_size -= table_file.file_size_; + } + + if (ids.size() == 0) { + return true; + } + + ConnectorPtr->update_all( + set( + c(&TableFileSchema::file_type_) = (int) TableFileSchema::TO_DELETE, + c(&TableFileSchema::updated_time_) = utils::GetMicroSecTimeStamp()), + where( + in(&TableFileSchema::id_, ids))); + + return true; + }); + + if (!commited) { + return HandleException("DiscardFiles error: sqlite transaction failed"); + } + } catch (std::exception &e) { + return HandleException("Encounter exception when discard table file", e.what()); + } + + return DiscardFiles(to_discard_size); +} + +} // namespace meta +} // namespace engine +} // namespace milvus + diff --git a/core/src/db/meta/SqliteMetaImpl.h b/core/src/db/meta/SqliteMetaImpl.h index 0fc3f3c4ba..84d97ed49d 100644 --- a/core/src/db/meta/SqliteMetaImpl.h +++ b/core/src/db/meta/SqliteMetaImpl.h @@ -49,7 +49,7 @@ class SqliteMetaImpl : public Meta { AllTables(std::vector& table_schema_array) override; Status - DeleteTable(const std::string& table_id) override; + DropTable(const std::string& table_id) override; Status DeleteTableFiles(const std::string& table_id) override; @@ -58,21 +58,26 @@ class SqliteMetaImpl : public Meta { CreateTableFile(TableFileSchema& file_schema) override; Status - DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override; + DropDataByDate(const std::string& table_id, const DatesT& dates) override; Status GetTableFiles(const std::string& table_id, const std::vector& ids, TableFilesSchema& table_files) override; - Status - FilesByType(const std::string& table_id, const std::vector& file_types, - std::vector& file_ids) override; - Status UpdateTableIndex(const std::string& table_id, const TableIndex& index) override; Status UpdateTableFlag(const std::string& table_id, int64_t flag) override; + Status + UpdateTableFile(TableFileSchema& file_schema) override; + + Status + UpdateTableFilesToIndex(const std::string& table_id) override; + + Status + UpdateTableFiles(TableFilesSchema& files) override; + Status DescribeTableIndex(const std::string& table_id, TableIndex& index) override; @@ -80,13 +85,16 @@ class SqliteMetaImpl : public Meta { DropTableIndex(const std::string& table_id) override; Status - UpdateTableFilesToIndex(const std::string& table_id) override; + CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override; Status - UpdateTableFile(TableFileSchema& file_schema) override; + DropPartition(const std::string& partition_name) override; Status - UpdateTableFiles(TableFilesSchema& files) override; + ShowPartitions(const std::string& table_id, std::vector& partiton_schema_array) override; + + Status + GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override; Status FilesToSearch(const std::string& table_id, const std::vector& ids, const DatesT& dates, @@ -99,11 +107,15 @@ class SqliteMetaImpl : public Meta { FilesToIndex(TableFilesSchema&) override; Status - Archive() override; + FilesByType(const std::string& table_id, const std::vector& file_types, + std::vector& file_ids) override; Status Size(uint64_t& result) override; + Status + Archive() override; + Status CleanUp() override; diff --git a/core/src/grpc/gen-milvus/milvus.grpc.pb.cc b/core/src/grpc/gen-milvus/milvus.grpc.pb.cc index 82a1b99162..9cb5e70d3d 100644 --- a/core/src/grpc/gen-milvus/milvus.grpc.pb.cc +++ b/core/src/grpc/gen-milvus/milvus.grpc.pb.cc @@ -22,19 +22,22 @@ namespace grpc { static const char* MilvusService_method_names[] = { "/milvus.grpc.MilvusService/CreateTable", "/milvus.grpc.MilvusService/HasTable", - "/milvus.grpc.MilvusService/DropTable", - "/milvus.grpc.MilvusService/CreateIndex", - "/milvus.grpc.MilvusService/Insert", - "/milvus.grpc.MilvusService/Search", - "/milvus.grpc.MilvusService/SearchInFiles", "/milvus.grpc.MilvusService/DescribeTable", "/milvus.grpc.MilvusService/CountTable", "/milvus.grpc.MilvusService/ShowTables", - "/milvus.grpc.MilvusService/Cmd", - "/milvus.grpc.MilvusService/DeleteByRange", - "/milvus.grpc.MilvusService/PreloadTable", + "/milvus.grpc.MilvusService/DropTable", + "/milvus.grpc.MilvusService/CreateIndex", "/milvus.grpc.MilvusService/DescribeIndex", "/milvus.grpc.MilvusService/DropIndex", + "/milvus.grpc.MilvusService/CreatePartition", + "/milvus.grpc.MilvusService/ShowPartitions", + "/milvus.grpc.MilvusService/DropPartition", + "/milvus.grpc.MilvusService/Insert", + "/milvus.grpc.MilvusService/Search", + "/milvus.grpc.MilvusService/SearchInFiles", + "/milvus.grpc.MilvusService/Cmd", + "/milvus.grpc.MilvusService/DeleteByDate", + "/milvus.grpc.MilvusService/PreloadTable", }; std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) { @@ -46,19 +49,22 @@ std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_p MilvusService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel) : channel_(channel), rpcmethod_CreateTable_(MilvusService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) , rpcmethod_HasTable_(MilvusService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DropTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_CreateIndex_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_Insert_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_Search_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_SearchInFiles_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DescribeTable_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_CountTable_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_ShowTables_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_Cmd_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DeleteByRange_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_PreloadTable_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DescribeIndex_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) - , rpcmethod_DropIndex_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DescribeTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CountTable_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ShowTables_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DropTable_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CreateIndex_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DescribeIndex_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DropIndex_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_CreatePartition_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_ShowPartitions_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DropPartition_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Insert_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Search_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_SearchInFiles_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_Cmd_(MilvusService_method_names[15], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_DeleteByDate_(MilvusService_method_names[16], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) + , rpcmethod_PreloadTable_(MilvusService_method_names[17], ::grpc::internal::RpcMethod::NORMAL_RPC, channel) {} ::grpc::Status MilvusService::Stub::CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) { @@ -117,146 +123,6 @@ void MilvusService::Stub::experimental_async::HasTable(::grpc::ClientContext* co return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::BoolReply>::Create(channel_.get(), cq, rpcmethod_HasTable_, context, request, false); } -::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false); -} - -::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false); -} - -::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false); -} - -::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false); -} - -::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false); -} - ::grpc::Status MilvusService::Stub::DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) { return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DescribeTable_, context, request, response); } @@ -341,88 +207,60 @@ void MilvusService::Stub::experimental_async::ShowTables(::grpc::ClientContext* return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TableNameList>::Create(channel_.get(), cq, rpcmethod_ShowTables_, context, request, false); } -::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response); +::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); } -void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false); } -::grpc::Status MilvusService::Stub::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByRange_, context, request, response); +::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f)); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f)); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); } -void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor); +void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, true); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true); } -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, false); -} - -::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { - return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { - ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); -} - -void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { - ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true); -} - -::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false); +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false); } ::grpc::Status MilvusService::Stub::DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) { @@ -481,6 +319,258 @@ void MilvusService::Stub::experimental_async::DropIndex(::grpc::ClientContext* c return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropIndex_, context, request, false); } +::grpc::Status MilvusService::Stub::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreatePartition_, context, request, response); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, false); +} + +::grpc::Status MilvusService::Stub::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ShowPartitions_, context, request, response); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, false); +} + +::grpc::Status MilvusService::Stub::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropPartition_, context, request, response); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, false); +} + +::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false); +} + +::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false); +} + +::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false); +} + +::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false); +} + +::grpc::Status MilvusService::Stub::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByDate_, context, request, response); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, false); +} + +::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) { + return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function f) { + ::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f)); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); +} + +void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) { + ::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true); +} + +::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false); +} + MilvusService::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( MilvusService_method_names[0], @@ -495,68 +585,83 @@ MilvusService::Service::Service() { AddMethod(new ::grpc::internal::RpcServiceMethod( MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::DropTable), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[3], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::CreateIndex), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[4], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( - std::mem_fn(&MilvusService::Service::Insert), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[5], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( - std::mem_fn(&MilvusService::Service::Search), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[6], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( - std::mem_fn(&MilvusService::Service::SearchInFiles), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[7], - ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>( std::mem_fn(&MilvusService::Service::DescribeTable), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[8], + MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>( std::mem_fn(&MilvusService::Service::CountTable), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[9], + MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::TableNameList>( std::mem_fn(&MilvusService::Service::ShowTables), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[10], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>( - std::mem_fn(&MilvusService::Service::Cmd), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[11], - ::grpc::internal::RpcMethod::NORMAL_RPC, - new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::DeleteByRange), this))); - AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[12], + MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( - std::mem_fn(&MilvusService::Service::PreloadTable), this))); + std::mem_fn(&MilvusService::Service::DropTable), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[13], + MilvusService_method_names[6], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::CreateIndex), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>( std::mem_fn(&MilvusService::Service::DescribeIndex), this))); AddMethod(new ::grpc::internal::RpcServiceMethod( - MilvusService_method_names[14], + MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( std::mem_fn(&MilvusService::Service::DropIndex), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[9], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::CreatePartition), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[10], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>( + std::mem_fn(&MilvusService::Service::ShowPartitions), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[11], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::DropPartition), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[12], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( + std::mem_fn(&MilvusService::Service::Insert), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[13], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( + std::mem_fn(&MilvusService::Service::Search), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[14], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( + std::mem_fn(&MilvusService::Service::SearchInFiles), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[15], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>( + std::mem_fn(&MilvusService::Service::Cmd), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[16], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::DeleteByDate), this))); + AddMethod(new ::grpc::internal::RpcServiceMethod( + MilvusService_method_names[17], + ::grpc::internal::RpcMethod::NORMAL_RPC, + new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>( + std::mem_fn(&MilvusService::Service::PreloadTable), this))); } MilvusService::Service::~Service() { @@ -576,41 +681,6 @@ MilvusService::Service::~Service() { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } -::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - ::grpc::Status MilvusService::Service::DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) { (void) context; (void) request; @@ -632,21 +702,14 @@ MilvusService::Service::~Service() { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } -::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) { +::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { (void) context; (void) request; (void) response; return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } -::grpc::Status MilvusService::Service::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) { - (void) context; - (void) request; - (void) response; - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); -} - -::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { +::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) { (void) context; (void) request; (void) response; @@ -667,6 +730,69 @@ MilvusService::Service::~Service() { return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } +::grpc::Status MilvusService::Service::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + +::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) { + (void) context; + (void) request; + (void) response; + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); +} + } // namespace milvus } // namespace grpc diff --git a/core/src/grpc/gen-milvus/milvus.grpc.pb.h b/core/src/grpc/gen-milvus/milvus.grpc.pb.h index 8ea2d13c80..439984f543 100644 --- a/core/src/grpc/gen-milvus/milvus.grpc.pb.h +++ b/core/src/grpc/gen-milvus/milvus.grpc.pb.h @@ -48,12 +48,11 @@ class MilvusService final { public: virtual ~StubInterface() {} // * - // @brief Create table method + // @brief This method is used to create table // - // This method is used to create table - // - // @param param, use to provide table information to be created. + // @param TableSchema, use to provide table information to be created. // + // @return Status virtual ::grpc::Status CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreateTableRaw(context, request, cq)); @@ -62,12 +61,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreateTableRaw(context, request, cq)); } // * - // @brief Test table existence method + // @brief This method is used to test table existence. // - // This method is used to test table existence. - // - // @param table_name, table name is going to be tested. + // @param TableName, table name is going to be tested. // + // @return BoolReply virtual ::grpc::Status HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::BoolReply* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>> AsyncHasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>>(AsyncHasTableRaw(context, request, cq)); @@ -76,93 +74,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>>(PrepareAsyncHasTableRaw(context, request, cq)); } // * - // @brief Delete table method + // @brief This method is used to get table schema. // - // This method is used to delete table. + // @param TableName, target table name. // - // @param table_name, table name is going to be deleted. - // - virtual ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); - } - // * - // @brief Build index by table method - // - // This method is used to build index by table in sync mode. - // - // @param table_name, table is going to be built index. - // - virtual ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); - } - // * - // @brief Add vector array to table - // - // This method is used to add vector array to table. - // - // @param table_name, table_name is inserted. - // @param record_array, vector array is inserted. - // - // @return vector id array - virtual ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); - } - // * - // @brief Query vector - // - // This method is used to query vector in table. - // - // @param table_name, table_name is queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); - } - // * - // @brief Internal use query interface - // - // This method is used to query vector in specified files. - // - // @param file_id_array, specified files id array, queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); - } - // * - // @brief Get table schema - // - // This method is used to get table schema. - // - // @param table_name, target table name. - // - // @return table schema + // @return TableSchema virtual ::grpc::Status DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>> AsyncDescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>>(AsyncDescribeTableRaw(context, request, cq)); @@ -171,13 +87,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>>(PrepareAsyncDescribeTableRaw(context, request, cq)); } // * - // @brief Get table schema + // @brief This method is used to get table schema. // - // This method is used to get table schema. + // @param TableName, target table name. // - // @param table_name, target table name. - // - // @return table schema + // @return TableRowCount virtual ::grpc::Status CountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableRowCount* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>> AsyncCountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>>(AsyncCountTableRaw(context, request, cq)); @@ -186,12 +100,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>>(PrepareAsyncCountTableRaw(context, request, cq)); } // * - // @brief List all tables in database + // @brief This method is used to list all tables. // - // This method is used to list all tables. + // @param Command, dummy parameter. // - // - // @return table names. + // @return TableNameList virtual ::grpc::Status ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::TableNameList* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>> AsyncShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>>(AsyncShowTablesRaw(context, request, cq)); @@ -200,50 +113,37 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>>(PrepareAsyncShowTablesRaw(context, request, cq)); } // * - // @brief Give the server status + // @brief This method is used to delete table. // - // This method is used to give the server status. + // @param TableName, table name is going to be deleted. // - // @return Server status. - virtual ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + // @return TableNameList + virtual ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); } // * - // @brief delete table by range + // @brief This method is used to build index by table in sync mode. // - // This method is used to delete vector by range + // @param IndexParam, index paramters. // - // @return rpc status. - virtual ::grpc::Status DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDeleteByRangeRaw(context, request, cq)); + // @return Status + virtual ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDeleteByRangeRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); } // * - // @brief preload table + // @brief This method is used to describe index // - // This method is used to preload table + // @param TableName, target table name. // - // @return Status. - virtual ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); - } - // * - // @brief describe index - // - // This method is used to describe index - // - // @return Status. + // @return IndexParam virtual ::grpc::Status DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>> AsyncDescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>>(AsyncDescribeIndexRaw(context, request, cq)); @@ -252,11 +152,11 @@ class MilvusService final { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>>(PrepareAsyncDescribeIndexRaw(context, request, cq)); } // * - // @brief drop index + // @brief This method is used to drop index // - // This method is used to drop index + // @param TableName, target table name. // - // @return Status. + // @return Status virtual ::grpc::Status DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropIndexRaw(context, request, cq)); @@ -264,181 +164,306 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropIndexRaw(context, request, cq)); } + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + virtual ::grpc::Status CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncCreatePartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncCreatePartitionRaw(context, request, cq)); + } + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + virtual ::grpc::Status ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>> AsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>>(AsyncShowPartitionsRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>> PrepareAsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>>(PrepareAsyncShowPartitionsRaw(context, request, cq)); + } + // * + // @brief This method is used to drop partition + // + // @param PartitionParam, target partition. + // + // @return Status + virtual ::grpc::Status DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDropPartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDropPartitionRaw(context, request, cq)); + } + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + virtual ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); + } + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + virtual ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); + } + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + virtual ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); + } + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + virtual ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + } + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + virtual ::grpc::Status DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncDeleteByDateRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncDeleteByDateRaw(context, request, cq)); + } + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + virtual ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) = 0; + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); + } class experimental_async_interface { public: virtual ~experimental_async_interface() {} // * - // @brief Create table method + // @brief This method is used to create table // - // This method is used to create table - // - // @param param, use to provide table information to be created. + // @param TableSchema, use to provide table information to be created. // + // @return Status virtual void CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void CreateTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void CreateTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Test table existence method + // @brief This method is used to test table existence. // - // This method is used to test table existence. - // - // @param table_name, table name is going to be tested. + // @param TableName, table name is going to be tested. // + // @return BoolReply virtual void HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, std::function) = 0; virtual void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, std::function) = 0; virtual void HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Delete table method + // @brief This method is used to get table schema. // - // This method is used to delete table. + // @param TableName, target table name. // - // @param table_name, table name is going to be deleted. - // - virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Build index by table method - // - // This method is used to build index by table in sync mode. - // - // @param table_name, table is going to be built index. - // - virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Add vector array to table - // - // This method is used to add vector array to table. - // - // @param table_name, table_name is inserted. - // @param record_array, vector array is inserted. - // - // @return vector id array - virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) = 0; - virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) = 0; - virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Query vector - // - // This method is used to query vector in table. - // - // @param table_name, table_name is queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Internal use query interface - // - // This method is used to query vector in specified files. - // - // @param file_id_array, specified files id array, queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; - virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief Get table schema - // - // This method is used to get table schema. - // - // @param table_name, target table name. - // - // @return table schema + // @return TableSchema virtual void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, std::function) = 0; virtual void DescribeTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableSchema* response, std::function) = 0; virtual void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void DescribeTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Get table schema + // @brief This method is used to get table schema. // - // This method is used to get table schema. + // @param TableName, target table name. // - // @param table_name, target table name. - // - // @return table schema + // @return TableRowCount virtual void CountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response, std::function) = 0; virtual void CountTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableRowCount* response, std::function) = 0; virtual void CountTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void CountTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableRowCount* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief List all tables in database + // @brief This method is used to list all tables. // - // This method is used to list all tables. + // @param Command, dummy parameter. // - // - // @return table names. + // @return TableNameList virtual void ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, std::function) = 0; virtual void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, std::function) = 0; virtual void ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief Give the server status + // @brief This method is used to delete table. // - // This method is used to give the server status. + // @param TableName, table name is going to be deleted. // - // @return Server status. - virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) = 0; - virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) = 0; - virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // @return TableNameList + virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief delete table by range + // @brief This method is used to build index by table in sync mode. // - // This method is used to delete vector by range + // @param IndexParam, index paramters. // - // @return rpc status. - virtual void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // @return Status + virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief preload table + // @brief This method is used to describe index // - // This method is used to preload table + // @param TableName, target table name. // - // @return Status. - virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; - virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; - // * - // @brief describe index - // - // This method is used to describe index - // - // @return Status. + // @return IndexParam virtual void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, std::function) = 0; virtual void DescribeIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::IndexParam* response, std::function) = 0; virtual void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void DescribeIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; // * - // @brief drop index + // @brief This method is used to drop index // - // This method is used to drop index + // @param TableName, target table name. // - // @return Status. + // @return Status virtual void DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; virtual void DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; virtual void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + virtual void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + virtual void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function) = 0; + virtual void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function) = 0; + virtual void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to drop partition + // + // @param PartitionParam, target partition. + // + // @return Status + virtual void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) = 0; + virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) = 0; + virtual void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) = 0; + virtual void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) = 0; + virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) = 0; + virtual void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + virtual void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) = 0; + virtual void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; + virtual void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; }; virtual class experimental_async_interface* experimental_async() { return nullptr; } private: @@ -446,32 +471,38 @@ class MilvusService final { virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreateTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>* AsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::BoolReply>* PrepareAsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>* AsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableSchema>* PrepareAsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>* AsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableRowCount>* PrepareAsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>* AsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TableNameList>* PrepareAsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; - virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>* AsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::IndexParam>* PrepareAsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>* AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::PartitionList>* PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; + virtual ::grpc::ClientAsyncResponseReaderInterface< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) = 0; }; class Stub final : public StubInterface { public: @@ -490,41 +521,6 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>> PrepareAsyncHasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>>(PrepareAsyncHasTableRaw(context, request, cq)); } - ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); - } - ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); - } - ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); - } - ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); - } - ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); - } ::grpc::Status DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) override; std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>> AsyncDescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>>(AsyncDescribeTableRaw(context, request, cq)); @@ -546,26 +542,19 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>> PrepareAsyncShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>>(PrepareAsyncShowTablesRaw(context, request, cq)); } - ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + ::grpc::Status DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDropTableRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropTableRaw(context, request, cq)); } - ::grpc::Status DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDeleteByRangeRaw(context, request, cq)); + ::grpc::Status CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncCreateIndexRaw(context, request, cq)); } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDeleteByRangeRaw(context, request, cq)); - } - ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); - } - std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { - return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncCreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncCreateIndexRaw(context, request, cq)); } ::grpc::Status DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) override; std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::IndexParam>> AsyncDescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { @@ -581,6 +570,69 @@ class MilvusService final { std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropIndexRaw(context, request, cq)); } + ::grpc::Status CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncCreatePartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncCreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncCreatePartitionRaw(context, request, cq)); + } + ::grpc::Status ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>> AsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>>(AsyncShowPartitionsRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>> PrepareAsyncShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>>(PrepareAsyncShowPartitionsRaw(context, request, cq)); + } + ::grpc::Status DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDropPartitionRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDropPartitionRaw(context, request, cq)); + } + ::grpc::Status Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> AsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(AsyncInsertRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>> PrepareAsyncInsert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>>(PrepareAsyncInsertRaw(context, request, cq)); + } + ::grpc::Status Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearch(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchRaw(context, request, cq)); + } + ::grpc::Status SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> AsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(AsyncSearchInFilesRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>> PrepareAsyncSearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>>(PrepareAsyncSearchInFilesRaw(context, request, cq)); + } + ::grpc::Status Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> AsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(AsyncCmdRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>> PrepareAsyncCmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>>(PrepareAsyncCmdRaw(context, request, cq)); + } + ::grpc::Status DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncDeleteByDateRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncDeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncDeleteByDateRaw(context, request, cq)); + } + ::grpc::Status PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) override; + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> AsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(AsyncPreloadTableRaw(context, request, cq)); + } + std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>> PrepareAsyncPreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) { + return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>>(PrepareAsyncPreloadTableRaw(context, request, cq)); + } class experimental_async final : public StubInterface::experimental_async_interface { public: @@ -592,26 +644,6 @@ class MilvusService final { void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, std::function) override; void HasTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void HasTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; - void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) override; - void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) override; - void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) override; - void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; - void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, std::function) override; void DescribeTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableSchema* response, std::function) override; void DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; @@ -624,18 +656,14 @@ class MilvusService final { void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, std::function) override; void ShowTables(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void ShowTables(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) override; - void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) override; - void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function) override; - void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; - void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; - void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; - void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; + void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function) override; + void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, std::function) override; void DescribeIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::IndexParam* response, std::function) override; void DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; @@ -644,6 +672,42 @@ class MilvusService final { void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; void DropIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; void DropIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) override; + void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function) override; + void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function) override; + void ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function) override; + void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function) override; + void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function) override; + void Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function) override; + void SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function) override; + void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function) override; + void Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function) override; + void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function) override; + void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function) override; + void PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; + void PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; private: friend class Stub; explicit experimental_async(Stub* stub): stub_(stub) { } @@ -659,47 +723,56 @@ class MilvusService final { ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreateTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>* AsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::BoolReply>* PrepareAsyncHasTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>* AsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableSchema>* PrepareAsyncDescribeTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableRowCount>* AsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableRowCount>* PrepareAsyncCountTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>* AsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TableNameList>* PrepareAsyncShowTablesRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; - ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::IndexParam>* AsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::IndexParam>* PrepareAsyncDescribeIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; + ::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) override; const ::grpc::internal::RpcMethod rpcmethod_CreateTable_; const ::grpc::internal::RpcMethod rpcmethod_HasTable_; - const ::grpc::internal::RpcMethod rpcmethod_DropTable_; - const ::grpc::internal::RpcMethod rpcmethod_CreateIndex_; - const ::grpc::internal::RpcMethod rpcmethod_Insert_; - const ::grpc::internal::RpcMethod rpcmethod_Search_; - const ::grpc::internal::RpcMethod rpcmethod_SearchInFiles_; const ::grpc::internal::RpcMethod rpcmethod_DescribeTable_; const ::grpc::internal::RpcMethod rpcmethod_CountTable_; const ::grpc::internal::RpcMethod rpcmethod_ShowTables_; - const ::grpc::internal::RpcMethod rpcmethod_Cmd_; - const ::grpc::internal::RpcMethod rpcmethod_DeleteByRange_; - const ::grpc::internal::RpcMethod rpcmethod_PreloadTable_; + const ::grpc::internal::RpcMethod rpcmethod_DropTable_; + const ::grpc::internal::RpcMethod rpcmethod_CreateIndex_; const ::grpc::internal::RpcMethod rpcmethod_DescribeIndex_; const ::grpc::internal::RpcMethod rpcmethod_DropIndex_; + const ::grpc::internal::RpcMethod rpcmethod_CreatePartition_; + const ::grpc::internal::RpcMethod rpcmethod_ShowPartitions_; + const ::grpc::internal::RpcMethod rpcmethod_DropPartition_; + const ::grpc::internal::RpcMethod rpcmethod_Insert_; + const ::grpc::internal::RpcMethod rpcmethod_Search_; + const ::grpc::internal::RpcMethod rpcmethod_SearchInFiles_; + const ::grpc::internal::RpcMethod rpcmethod_Cmd_; + const ::grpc::internal::RpcMethod rpcmethod_DeleteByDate_; + const ::grpc::internal::RpcMethod rpcmethod_PreloadTable_; }; static std::unique_ptr NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); @@ -708,137 +781,136 @@ class MilvusService final { Service(); virtual ~Service(); // * - // @brief Create table method + // @brief This method is used to create table // - // This method is used to create table - // - // @param param, use to provide table information to be created. + // @param TableSchema, use to provide table information to be created. // + // @return Status virtual ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response); // * - // @brief Test table existence method + // @brief This method is used to test table existence. // - // This method is used to test table existence. - // - // @param table_name, table name is going to be tested. + // @param TableName, table name is going to be tested. // + // @return BoolReply virtual ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response); // * - // @brief Delete table method + // @brief This method is used to get table schema. // - // This method is used to delete table. + // @param TableName, target table name. // - // @param table_name, table name is going to be deleted. - // - virtual ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); - // * - // @brief Build index by table method - // - // This method is used to build index by table in sync mode. - // - // @param table_name, table is going to be built index. - // - virtual ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response); - // * - // @brief Add vector array to table - // - // This method is used to add vector array to table. - // - // @param table_name, table_name is inserted. - // @param record_array, vector array is inserted. - // - // @return vector id array - virtual ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response); - // * - // @brief Query vector - // - // This method is used to query vector in table. - // - // @param table_name, table_name is queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response); - // * - // @brief Internal use query interface - // - // This method is used to query vector in specified files. - // - // @param file_id_array, specified files id array, queried. - // @param query_record_array, all vector are going to be queried. - // @param query_range_array, optional ranges for conditional search. If not specified, search whole table - // @param topk, how many similarity vectors will be searched. - // - // @return query result array. - virtual ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response); - // * - // @brief Get table schema - // - // This method is used to get table schema. - // - // @param table_name, target table name. - // - // @return table schema + // @return TableSchema virtual ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response); // * - // @brief Get table schema + // @brief This method is used to get table schema. // - // This method is used to get table schema. + // @param TableName, target table name. // - // @param table_name, target table name. - // - // @return table schema + // @return TableRowCount virtual ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response); // * - // @brief List all tables in database + // @brief This method is used to list all tables. // - // This method is used to list all tables. + // @param Command, dummy parameter. // - // - // @return table names. + // @return TableNameList virtual ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response); // * - // @brief Give the server status + // @brief This method is used to delete table. // - // This method is used to give the server status. + // @param TableName, table name is going to be deleted. // - // @return Server status. - virtual ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response); + // @return TableNameList + virtual ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); // * - // @brief delete table by range + // @brief This method is used to build index by table in sync mode. // - // This method is used to delete vector by range + // @param IndexParam, index paramters. // - // @return rpc status. - virtual ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response); + // @return Status + virtual ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response); // * - // @brief preload table + // @brief This method is used to describe index // - // This method is used to preload table + // @param TableName, target table name. // - // @return Status. - virtual ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); - // * - // @brief describe index - // - // This method is used to describe index - // - // @return Status. + // @return IndexParam virtual ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response); // * - // @brief drop index + // @brief This method is used to drop index // - // This method is used to drop index + // @param TableName, target table name. // - // @return Status. + // @return Status virtual ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + virtual ::grpc::Status CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + virtual ::grpc::Status ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response); + // * + // @brief This method is used to drop partition + // + // @param PartitionParam, target partition. + // + // @return Status + virtual ::grpc::Status DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + virtual ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response); + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + virtual ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response); + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + virtual ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response); + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + virtual ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response); + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + virtual ::grpc::Status DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response); + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + virtual ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response); }; template class WithAsyncMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_CreateTable() { ::grpc::Service::MarkMethodAsync(0); @@ -847,7 +919,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -858,7 +930,7 @@ class MilvusService final { template class WithAsyncMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_HasTable() { ::grpc::Service::MarkMethodAsync(1); @@ -867,7 +939,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -876,270 +948,330 @@ class MilvusService final { } }; template - class WithAsyncMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_DropTable() { - ::grpc::Service::MarkMethodAsync(2); - } - ~WithAsyncMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestDropTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_CreateIndex() { - ::grpc::Service::MarkMethodAsync(3); - } - ~WithAsyncMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestCreateIndex(::grpc::ServerContext* context, ::milvus::grpc::IndexParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_Insert() { - ::grpc::Service::MarkMethodAsync(4); - } - ~WithAsyncMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestInsert(::grpc::ServerContext* context, ::milvus::grpc::InsertParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::VectorIds>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_Search() { - ::grpc::Service::MarkMethodAsync(5); - } - ~WithAsyncMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearch(::grpc::ServerContext* context, ::milvus::grpc::SearchParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_SearchInFiles() { - ::grpc::Service::MarkMethodAsync(6); - } - ~WithAsyncMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearchInFiles(::grpc::ServerContext* context, ::milvus::grpc::SearchInFilesParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template class WithAsyncMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_DescribeTable() { - ::grpc::Service::MarkMethodAsync(7); + ::grpc::Service::MarkMethodAsync(2); } ~WithAsyncMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TableSchema>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_CountTable() { - ::grpc::Service::MarkMethodAsync(8); + ::grpc::Service::MarkMethodAsync(3); } ~WithAsyncMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestCountTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TableRowCount>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_ShowTables() { - ::grpc::Service::MarkMethodAsync(9); + ::grpc::Service::MarkMethodAsync(4); } ~WithAsyncMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestShowTables(::grpc::ServerContext* context, ::milvus::grpc::Command* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TableNameList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithAsyncMethod_Cmd : public BaseClass { + class WithAsyncMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithAsyncMethod_Cmd() { - ::grpc::Service::MarkMethodAsync(10); + WithAsyncMethod_DropTable() { + ::grpc::Service::MarkMethodAsync(5); } - ~WithAsyncMethod_Cmd() override { + ~WithAsyncMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestCmd(::grpc::ServerContext* context, ::milvus::grpc::Command* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::StringReply>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + void RequestDropTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithAsyncMethod_DeleteByRange : public BaseClass { + class WithAsyncMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithAsyncMethod_DeleteByRange() { - ::grpc::Service::MarkMethodAsync(11); + WithAsyncMethod_CreateIndex() { + ::grpc::Service::MarkMethodAsync(6); } - ~WithAsyncMethod_DeleteByRange() override { + ~WithAsyncMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestDeleteByRange(::grpc::ServerContext* context, ::milvus::grpc::DeleteByRangeParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithAsyncMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithAsyncMethod_PreloadTable() { - ::grpc::Service::MarkMethodAsync(12); - } - ~WithAsyncMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestPreloadTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + void RequestCreateIndex(::grpc::ServerContext* context, ::milvus::grpc::IndexParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_DescribeIndex() { - ::grpc::Service::MarkMethodAsync(13); + ::grpc::Service::MarkMethodAsync(7); } ~WithAsyncMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeIndex(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::IndexParam>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithAsyncMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithAsyncMethod_DropIndex() { - ::grpc::Service::MarkMethodAsync(14); + ::grpc::Service::MarkMethodAsync(8); } ~WithAsyncMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDropIndex(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_CreatePartition() { + ::grpc::Service::MarkMethodAsync(9); + } + ~WithAsyncMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCreatePartition(::grpc::ServerContext* context, ::milvus::grpc::PartitionParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_ShowPartitions() { + ::grpc::Service::MarkMethodAsync(10); + } + ~WithAsyncMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestShowPartitions(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::PartitionList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_DropPartition() { + ::grpc::Service::MarkMethodAsync(11); + } + ~WithAsyncMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDropPartition(::grpc::ServerContext* context, ::milvus::grpc::PartitionParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Insert() { + ::grpc::Service::MarkMethodAsync(12); + } + ~WithAsyncMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestInsert(::grpc::ServerContext* context, ::milvus::grpc::InsertParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::VectorIds>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Search() { + ::grpc::Service::MarkMethodAsync(13); + } + ~WithAsyncMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearch(::grpc::ServerContext* context, ::milvus::grpc::SearchParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_SearchInFiles() { + ::grpc::Service::MarkMethodAsync(14); + } + ~WithAsyncMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearchInFiles(::grpc::ServerContext* context, ::milvus::grpc::SearchInFilesParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::TopKQueryResultList>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { ::grpc::Service::RequestAsyncUnary(14, context, request, response, new_call_cq, notification_cq, tag); } }; - typedef WithAsyncMethod_CreateTable > > > > > > > > > > > > > > AsyncService; + template + class WithAsyncMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_Cmd() { + ::grpc::Service::MarkMethodAsync(15); + } + ~WithAsyncMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCmd(::grpc::ServerContext* context, ::milvus::grpc::Command* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::StringReply>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(15, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_DeleteByDate() { + ::grpc::Service::MarkMethodAsync(16); + } + ~WithAsyncMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDeleteByDate(::grpc::ServerContext* context, ::milvus::grpc::DeleteByDateParam* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(16, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithAsyncMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithAsyncMethod_PreloadTable() { + ::grpc::Service::MarkMethodAsync(17); + } + ~WithAsyncMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPreloadTable(::grpc::ServerContext* context, ::milvus::grpc::TableName* request, ::grpc::ServerAsyncResponseWriter< ::milvus::grpc::Status>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(17, context, request, response, new_call_cq, notification_cq, tag); + } + }; + typedef WithAsyncMethod_CreateTable > > > > > > > > > > > > > > > > > AsyncService; template class ExperimentalWithCallbackMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_CreateTable() { ::grpc::Service::experimental().MarkMethodCallback(0, @@ -1161,16 +1293,16 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_HasTable() { ::grpc::Service::experimental().MarkMethodCallback(1, @@ -1192,174 +1324,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_DropTable() { - ::grpc::Service::experimental().MarkMethodCallback(2, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::TableName* request, - ::milvus::grpc::Status* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->DropTable(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_DropTable( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(2)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_CreateIndex() { - ::grpc::Service::experimental().MarkMethodCallback(3, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::IndexParam* request, - ::milvus::grpc::Status* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->CreateIndex(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_CreateIndex( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(3)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_Insert() { - ::grpc::Service::experimental().MarkMethodCallback(4, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::InsertParam* request, - ::milvus::grpc::VectorIds* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->Insert(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_Insert( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>*>( - ::grpc::Service::experimental().GetHandler(4)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_Search() { - ::grpc::Service::experimental().MarkMethodCallback(5, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::SearchParam* request, - ::milvus::grpc::TopKQueryResultList* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->Search(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_Search( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>*>( - ::grpc::Service::experimental().GetHandler(5)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_SearchInFiles() { - ::grpc::Service::experimental().MarkMethodCallback(6, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::SearchInFilesParam* request, - ::milvus::grpc::TopKQueryResultList* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->SearchInFiles(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_SearchInFiles( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>*>( - ::grpc::Service::experimental().GetHandler(6)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_DescribeTable() { - ::grpc::Service::experimental().MarkMethodCallback(7, + ::grpc::Service::experimental().MarkMethodCallback(2, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1371,26 +1348,26 @@ class MilvusService final { void SetMessageAllocatorFor_DescribeTable( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>*>( - ::grpc::Service::experimental().GetHandler(7)) + ::grpc::Service::experimental().GetHandler(2)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_CountTable() { - ::grpc::Service::experimental().MarkMethodCallback(8, + ::grpc::Service::experimental().MarkMethodCallback(3, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1402,26 +1379,26 @@ class MilvusService final { void SetMessageAllocatorFor_CountTable( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>*>( - ::grpc::Service::experimental().GetHandler(8)) + ::grpc::Service::experimental().GetHandler(3)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_ShowTables() { - ::grpc::Service::experimental().MarkMethodCallback(9, + ::grpc::Service::experimental().MarkMethodCallback(4, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>( [this](::grpc::ServerContext* context, const ::milvus::grpc::Command* request, @@ -1433,119 +1410,88 @@ class MilvusService final { void SetMessageAllocatorFor_ShowTables( ::grpc::experimental::MessageAllocator< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>*>( - ::grpc::Service::experimental().GetHandler(9)) + ::grpc::Service::experimental().GetHandler(4)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template - class ExperimentalWithCallbackMethod_Cmd : public BaseClass { + class ExperimentalWithCallbackMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - ExperimentalWithCallbackMethod_Cmd() { - ::grpc::Service::experimental().MarkMethodCallback(10, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::Command* request, - ::milvus::grpc::StringReply* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->Cmd(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_Cmd( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::Command, ::milvus::grpc::StringReply>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>*>( - ::grpc::Service::experimental().GetHandler(10)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_Cmd() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_DeleteByRange : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_DeleteByRange() { - ::grpc::Service::experimental().MarkMethodCallback(11, - new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>( - [this](::grpc::ServerContext* context, - const ::milvus::grpc::DeleteByRangeParam* request, - ::milvus::grpc::Status* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->DeleteByRange(context, request, response, controller); - })); - } - void SetMessageAllocatorFor_DeleteByRange( - ::grpc::experimental::MessageAllocator< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>* allocator) { - static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(11)) - ->SetMessageAllocator(allocator); - } - ~ExperimentalWithCallbackMethod_DeleteByRange() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithCallbackMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithCallbackMethod_PreloadTable() { - ::grpc::Service::experimental().MarkMethodCallback(12, + ExperimentalWithCallbackMethod_DropTable() { + ::grpc::Service::experimental().MarkMethodCallback(5, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { - return this->PreloadTable(context, request, response, controller); + return this->DropTable(context, request, response, controller); })); } - void SetMessageAllocatorFor_PreloadTable( + void SetMessageAllocatorFor_DropTable( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(12)) + ::grpc::Service::experimental().GetHandler(5)) ->SetMessageAllocator(allocator); } - ~ExperimentalWithCallbackMethod_PreloadTable() override { + ~ExperimentalWithCallbackMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_CreateIndex : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_CreateIndex() { + ::grpc::Service::experimental().MarkMethodCallback(6, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::IndexParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->CreateIndex(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_CreateIndex( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(6)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_CreateIndex() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_DescribeIndex() { - ::grpc::Service::experimental().MarkMethodCallback(13, + ::grpc::Service::experimental().MarkMethodCallback(7, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1557,26 +1503,26 @@ class MilvusService final { void SetMessageAllocatorFor_DescribeIndex( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>*>( - ::grpc::Service::experimental().GetHandler(13)) + ::grpc::Service::experimental().GetHandler(7)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithCallbackMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithCallbackMethod_DropIndex() { - ::grpc::Service::experimental().MarkMethodCallback(14, + ::grpc::Service::experimental().MarkMethodCallback(8, new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( [this](::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, @@ -1588,24 +1534,303 @@ class MilvusService final { void SetMessageAllocatorFor_DropIndex( ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( - ::grpc::Service::experimental().GetHandler(14)) + ::grpc::Service::experimental().GetHandler(8)) ->SetMessageAllocator(allocator); } ~ExperimentalWithCallbackMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; - typedef ExperimentalWithCallbackMethod_CreateTable > > > > > > > > > > > > > > ExperimentalCallbackService; + template + class ExperimentalWithCallbackMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_CreatePartition() { + ::grpc::Service::experimental().MarkMethodCallback(9, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->CreatePartition(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_CreatePartition( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(9)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_ShowPartitions() { + ::grpc::Service::experimental().MarkMethodCallback(10, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::TableName* request, + ::milvus::grpc::PartitionList* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->ShowPartitions(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_ShowPartitions( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>*>( + ::grpc::Service::experimental().GetHandler(10)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_DropPartition() { + ::grpc::Service::experimental().MarkMethodCallback(11, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->DropPartition(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_DropPartition( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(11)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Insert() { + ::grpc::Service::experimental().MarkMethodCallback(12, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::InsertParam* request, + ::milvus::grpc::VectorIds* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->Insert(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_Insert( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>*>( + ::grpc::Service::experimental().GetHandler(12)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Search() { + ::grpc::Service::experimental().MarkMethodCallback(13, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::SearchParam* request, + ::milvus::grpc::TopKQueryResultList* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->Search(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_Search( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>*>( + ::grpc::Service::experimental().GetHandler(13)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_SearchInFiles() { + ::grpc::Service::experimental().MarkMethodCallback(14, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::SearchInFilesParam* request, + ::milvus::grpc::TopKQueryResultList* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->SearchInFiles(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_SearchInFiles( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>*>( + ::grpc::Service::experimental().GetHandler(14)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_Cmd() { + ::grpc::Service::experimental().MarkMethodCallback(15, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::Command* request, + ::milvus::grpc::StringReply* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->Cmd(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_Cmd( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::Command, ::milvus::grpc::StringReply>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>*>( + ::grpc::Service::experimental().GetHandler(15)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_DeleteByDate() { + ::grpc::Service::experimental().MarkMethodCallback(16, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::DeleteByDateParam* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->DeleteByDate(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_DeleteByDate( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(16)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithCallbackMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithCallbackMethod_PreloadTable() { + ::grpc::Service::experimental().MarkMethodCallback(17, + new ::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>( + [this](::grpc::ServerContext* context, + const ::milvus::grpc::TableName* request, + ::milvus::grpc::Status* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + return this->PreloadTable(context, request, response, controller); + })); + } + void SetMessageAllocatorFor_PreloadTable( + ::grpc::experimental::MessageAllocator< ::milvus::grpc::TableName, ::milvus::grpc::Status>* allocator) { + static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>*>( + ::grpc::Service::experimental().GetHandler(17)) + ->SetMessageAllocator(allocator); + } + ~ExperimentalWithCallbackMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + typedef ExperimentalWithCallbackMethod_CreateTable > > > > > > > > > > > > > > > > > ExperimentalCallbackService; template class WithGenericMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_CreateTable() { ::grpc::Service::MarkMethodGeneric(0); @@ -1614,7 +1839,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1622,7 +1847,7 @@ class MilvusService final { template class WithGenericMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_HasTable() { ::grpc::Service::MarkMethodGeneric(1); @@ -1631,92 +1856,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_DropTable() { - ::grpc::Service::MarkMethodGeneric(2); - } - ~WithGenericMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_CreateIndex() { - ::grpc::Service::MarkMethodGeneric(3); - } - ~WithGenericMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_Insert() { - ::grpc::Service::MarkMethodGeneric(4); - } - ~WithGenericMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_Search() { - ::grpc::Service::MarkMethodGeneric(5); - } - ~WithGenericMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_SearchInFiles() { - ::grpc::Service::MarkMethodGeneric(6); - } - ~WithGenericMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1724,16 +1864,16 @@ class MilvusService final { template class WithGenericMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_DescribeTable() { - ::grpc::Service::MarkMethodGeneric(7); + ::grpc::Service::MarkMethodGeneric(2); } ~WithGenericMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1741,16 +1881,16 @@ class MilvusService final { template class WithGenericMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_CountTable() { - ::grpc::Service::MarkMethodGeneric(8); + ::grpc::Service::MarkMethodGeneric(3); } ~WithGenericMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1758,67 +1898,50 @@ class MilvusService final { template class WithGenericMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_ShowTables() { - ::grpc::Service::MarkMethodGeneric(9); + ::grpc::Service::MarkMethodGeneric(4); } ~WithGenericMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } }; template - class WithGenericMethod_Cmd : public BaseClass { + class WithGenericMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithGenericMethod_Cmd() { - ::grpc::Service::MarkMethodGeneric(10); + WithGenericMethod_DropTable() { + ::grpc::Service::MarkMethodGeneric(5); } - ~WithGenericMethod_Cmd() override { + ~WithGenericMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } }; template - class WithGenericMethod_DeleteByRange : public BaseClass { + class WithGenericMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithGenericMethod_DeleteByRange() { - ::grpc::Service::MarkMethodGeneric(11); + WithGenericMethod_CreateIndex() { + ::grpc::Service::MarkMethodGeneric(6); } - ~WithGenericMethod_DeleteByRange() override { + ~WithGenericMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - }; - template - class WithGenericMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithGenericMethod_PreloadTable() { - ::grpc::Service::MarkMethodGeneric(12); - } - ~WithGenericMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1826,16 +1949,16 @@ class MilvusService final { template class WithGenericMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_DescribeIndex() { - ::grpc::Service::MarkMethodGeneric(13); + ::grpc::Service::MarkMethodGeneric(7); } ~WithGenericMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1843,16 +1966,169 @@ class MilvusService final { template class WithGenericMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithGenericMethod_DropIndex() { - ::grpc::Service::MarkMethodGeneric(14); + ::grpc::Service::MarkMethodGeneric(8); } ~WithGenericMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_CreatePartition() { + ::grpc::Service::MarkMethodGeneric(9); + } + ~WithGenericMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_ShowPartitions() { + ::grpc::Service::MarkMethodGeneric(10); + } + ~WithGenericMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_DropPartition() { + ::grpc::Service::MarkMethodGeneric(11); + } + ~WithGenericMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Insert() { + ::grpc::Service::MarkMethodGeneric(12); + } + ~WithGenericMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Search() { + ::grpc::Service::MarkMethodGeneric(13); + } + ~WithGenericMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_SearchInFiles() { + ::grpc::Service::MarkMethodGeneric(14); + } + ~WithGenericMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_Cmd() { + ::grpc::Service::MarkMethodGeneric(15); + } + ~WithGenericMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_DeleteByDate() { + ::grpc::Service::MarkMethodGeneric(16); + } + ~WithGenericMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + }; + template + class WithGenericMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithGenericMethod_PreloadTable() { + ::grpc::Service::MarkMethodGeneric(17); + } + ~WithGenericMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1860,7 +2136,7 @@ class MilvusService final { template class WithRawMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_CreateTable() { ::grpc::Service::MarkMethodRaw(0); @@ -1869,7 +2145,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1880,7 +2156,7 @@ class MilvusService final { template class WithRawMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_HasTable() { ::grpc::Service::MarkMethodRaw(1); @@ -1889,7 +2165,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -1898,269 +2174,329 @@ class MilvusService final { } }; template - class WithRawMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_DropTable() { - ::grpc::Service::MarkMethodRaw(2); - } - ~WithRawMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestDropTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_CreateIndex() { - ::grpc::Service::MarkMethodRaw(3); - } - ~WithRawMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestCreateIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_Insert() { - ::grpc::Service::MarkMethodRaw(4); - } - ~WithRawMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestInsert(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_Search() { - ::grpc::Service::MarkMethodRaw(5); - } - ~WithRawMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearch(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_SearchInFiles() { - ::grpc::Service::MarkMethodRaw(6); - } - ~WithRawMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestSearchInFiles(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template class WithRawMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_DescribeTable() { - ::grpc::Service::MarkMethodRaw(7); + ::grpc::Service::MarkMethodRaw(2); } ~WithRawMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_CountTable() { - ::grpc::Service::MarkMethodRaw(8); + ::grpc::Service::MarkMethodRaw(3); } ~WithRawMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestCountTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(3, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_ShowTables() { - ::grpc::Service::MarkMethodRaw(9); + ::grpc::Service::MarkMethodRaw(4); } ~WithRawMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestShowTables(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithRawMethod_Cmd : public BaseClass { + class WithRawMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithRawMethod_Cmd() { - ::grpc::Service::MarkMethodRaw(10); + WithRawMethod_DropTable() { + ::grpc::Service::MarkMethodRaw(5); } - ~WithRawMethod_Cmd() override { + ~WithRawMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestCmd(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + void RequestDropTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag); } }; template - class WithRawMethod_DeleteByRange : public BaseClass { + class WithRawMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithRawMethod_DeleteByRange() { - ::grpc::Service::MarkMethodRaw(11); + WithRawMethod_CreateIndex() { + ::grpc::Service::MarkMethodRaw(6); } - ~WithRawMethod_DeleteByRange() override { + ~WithRawMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - void RequestDeleteByRange(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); - } - }; - template - class WithRawMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithRawMethod_PreloadTable() { - ::grpc::Service::MarkMethodRaw(12); - } - ~WithRawMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - void RequestPreloadTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + void RequestCreateIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_DescribeIndex() { - ::grpc::Service::MarkMethodRaw(13); + ::grpc::Service::MarkMethodRaw(7); } ~WithRawMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDescribeIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { - ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag); } }; template class WithRawMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithRawMethod_DropIndex() { - ::grpc::Service::MarkMethodRaw(14); + ::grpc::Service::MarkMethodRaw(8); } ~WithRawMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } void RequestDropIndex(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(8, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_CreatePartition() { + ::grpc::Service::MarkMethodRaw(9); + } + ~WithRawMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCreatePartition(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(9, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_ShowPartitions() { + ::grpc::Service::MarkMethodRaw(10); + } + ~WithRawMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestShowPartitions(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(10, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_DropPartition() { + ::grpc::Service::MarkMethodRaw(11); + } + ~WithRawMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDropPartition(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(11, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Insert() { + ::grpc::Service::MarkMethodRaw(12); + } + ~WithRawMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestInsert(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(12, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Search() { + ::grpc::Service::MarkMethodRaw(13); + } + ~WithRawMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearch(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(13, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_SearchInFiles() { + ::grpc::Service::MarkMethodRaw(14); + } + ~WithRawMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestSearchInFiles(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { ::grpc::Service::RequestAsyncUnary(14, context, request, response, new_call_cq, notification_cq, tag); } }; template + class WithRawMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_Cmd() { + ::grpc::Service::MarkMethodRaw(15); + } + ~WithRawMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestCmd(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(15, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_DeleteByDate() { + ::grpc::Service::MarkMethodRaw(16); + } + ~WithRawMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestDeleteByDate(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(16, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template + class WithRawMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithRawMethod_PreloadTable() { + ::grpc::Service::MarkMethodRaw(17); + } + ~WithRawMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + void RequestPreloadTable(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { + ::grpc::Service::RequestAsyncUnary(17, context, request, response, new_call_cq, notification_cq, tag); + } + }; + template class ExperimentalWithRawCallbackMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_CreateTable() { ::grpc::Service::experimental().MarkMethodRawCallback(0, @@ -2176,16 +2512,16 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CreateTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CreateTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_HasTable() { ::grpc::Service::experimental().MarkMethodRawCallback(1, @@ -2201,144 +2537,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void HasTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_DropTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(2, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->DropTable(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void DropTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_CreateIndex() { - ::grpc::Service::experimental().MarkMethodRawCallback(3, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->CreateIndex(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void CreateIndex(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_Insert() { - ::grpc::Service::experimental().MarkMethodRawCallback(4, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->Insert(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Insert(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_Search() { - ::grpc::Service::experimental().MarkMethodRawCallback(5, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->Search(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void Search(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_SearchInFiles() { - ::grpc::Service::experimental().MarkMethodRawCallback(6, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->SearchInFiles(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void SearchInFiles(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void HasTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_DescribeTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(7, + ::grpc::Service::experimental().MarkMethodRawCallback(2, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2351,19 +2562,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_CountTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(8, + ::grpc::Service::experimental().MarkMethodRawCallback(3, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2376,19 +2587,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void CountTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CountTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_ShowTables() { - ::grpc::Service::experimental().MarkMethodRawCallback(9, + ::grpc::Service::experimental().MarkMethodRawCallback(4, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2401,94 +2612,69 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void ShowTables(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void ShowTables(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template - class ExperimentalWithRawCallbackMethod_Cmd : public BaseClass { + class ExperimentalWithRawCallbackMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - ExperimentalWithRawCallbackMethod_Cmd() { - ::grpc::Service::experimental().MarkMethodRawCallback(10, + ExperimentalWithRawCallbackMethod_DropTable() { + ::grpc::Service::experimental().MarkMethodRawCallback(5, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { - this->Cmd(context, request, response, controller); + this->DropTable(context, request, response, controller); })); } - ~ExperimentalWithRawCallbackMethod_Cmd() override { + ~ExperimentalWithRawCallbackMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void Cmd(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template - class ExperimentalWithRawCallbackMethod_DeleteByRange : public BaseClass { + class ExperimentalWithRawCallbackMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - ExperimentalWithRawCallbackMethod_DeleteByRange() { - ::grpc::Service::experimental().MarkMethodRawCallback(11, + ExperimentalWithRawCallbackMethod_CreateIndex() { + ::grpc::Service::experimental().MarkMethodRawCallback(6, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { - this->DeleteByRange(context, request, response, controller); + this->CreateIndex(context, request, response, controller); })); } - ~ExperimentalWithRawCallbackMethod_DeleteByRange() override { + ~ExperimentalWithRawCallbackMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DeleteByRange(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } - }; - template - class ExperimentalWithRawCallbackMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - ExperimentalWithRawCallbackMethod_PreloadTable() { - ::grpc::Service::experimental().MarkMethodRawCallback(12, - new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( - [this](::grpc::ServerContext* context, - const ::grpc::ByteBuffer* request, - ::grpc::ByteBuffer* response, - ::grpc::experimental::ServerCallbackRpcController* controller) { - this->PreloadTable(context, request, response, controller); - })); - } - ~ExperimentalWithRawCallbackMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable synchronous version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - virtual void PreloadTable(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void CreateIndex(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_DescribeIndex() { - ::grpc::Service::experimental().MarkMethodRawCallback(13, + ::grpc::Service::experimental().MarkMethodRawCallback(7, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2501,19 +2687,19 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DescribeIndex(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DescribeIndex(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class ExperimentalWithRawCallbackMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: ExperimentalWithRawCallbackMethod_DropIndex() { - ::grpc::Service::experimental().MarkMethodRawCallback(14, + ::grpc::Service::experimental().MarkMethodRawCallback(8, new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( [this](::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, @@ -2526,16 +2712,241 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable synchronous version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } - virtual void DropIndex(::grpc::ServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + virtual void DropIndex(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_CreatePartition() { + ::grpc::Service::experimental().MarkMethodRawCallback(9, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->CreatePartition(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void CreatePartition(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_ShowPartitions() { + ::grpc::Service::experimental().MarkMethodRawCallback(10, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->ShowPartitions(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void ShowPartitions(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_DropPartition() { + ::grpc::Service::experimental().MarkMethodRawCallback(11, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->DropPartition(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DropPartition(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Insert() { + ::grpc::Service::experimental().MarkMethodRawCallback(12, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->Insert(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Insert(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Search() { + ::grpc::Service::experimental().MarkMethodRawCallback(13, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->Search(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Search(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_SearchInFiles() { + ::grpc::Service::experimental().MarkMethodRawCallback(14, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->SearchInFiles(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void SearchInFiles(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_Cmd() { + ::grpc::Service::experimental().MarkMethodRawCallback(15, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->Cmd(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void Cmd(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_DeleteByDate() { + ::grpc::Service::experimental().MarkMethodRawCallback(16, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->DeleteByDate(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void DeleteByDate(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } + }; + template + class ExperimentalWithRawCallbackMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + ExperimentalWithRawCallbackMethod_PreloadTable() { + ::grpc::Service::experimental().MarkMethodRawCallback(17, + new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( + [this](::grpc::ServerContext* context, + const ::grpc::ByteBuffer* request, + ::grpc::ByteBuffer* response, + ::grpc::experimental::ServerCallbackRpcController* controller) { + this->PreloadTable(context, request, response, controller); + })); + } + ~ExperimentalWithRawCallbackMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable synchronous version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + virtual void PreloadTable(::grpc::ServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/, ::grpc::experimental::ServerCallbackRpcController* controller) { controller->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "")); } }; template class WithStreamedUnaryMethod_CreateTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_CreateTable() { ::grpc::Service::MarkMethodStreamed(0, @@ -2545,7 +2956,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableSchema* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2555,7 +2966,7 @@ class MilvusService final { template class WithStreamedUnaryMethod_HasTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_HasTable() { ::grpc::Service::MarkMethodStreamed(1, @@ -2565,7 +2976,7 @@ class MilvusService final { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override { + ::grpc::Status HasTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::BoolReply* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2573,119 +2984,19 @@ class MilvusService final { virtual ::grpc::Status StreamedHasTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::BoolReply>* server_unary_streamer) = 0; }; template - class WithStreamedUnaryMethod_DropTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_DropTable() { - ::grpc::Service::MarkMethodStreamed(2, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropTable::StreamedDropTable, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_DropTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedDropTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_CreateIndex : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_CreateIndex() { - ::grpc::Service::MarkMethodStreamed(3, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_CreateIndex::StreamedCreateIndex, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_CreateIndex() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedCreateIndex(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::IndexParam,::milvus::grpc::Status>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_Insert : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_Insert() { - ::grpc::Service::MarkMethodStreamed(4, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(std::bind(&WithStreamedUnaryMethod_Insert::StreamedInsert, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_Insert() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedInsert(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::InsertParam,::milvus::grpc::VectorIds>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_Search : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_Search() { - ::grpc::Service::MarkMethodStreamed(5, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_Search::StreamedSearch, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_Search() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedSearch(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_SearchInFiles : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_SearchInFiles() { - ::grpc::Service::MarkMethodStreamed(6, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_SearchInFiles::StreamedSearchInFiles, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_SearchInFiles() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedSearchInFiles(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchInFilesParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; - }; - template class WithStreamedUnaryMethod_DescribeTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_DescribeTable() { - ::grpc::Service::MarkMethodStreamed(7, + ::grpc::Service::MarkMethodStreamed(2, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>(std::bind(&WithStreamedUnaryMethod_DescribeTable::StreamedDescribeTable, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_DescribeTable() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override { + ::grpc::Status DescribeTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableSchema* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2695,17 +3006,17 @@ class MilvusService final { template class WithStreamedUnaryMethod_CountTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_CountTable() { - ::grpc::Service::MarkMethodStreamed(8, + ::grpc::Service::MarkMethodStreamed(3, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>(std::bind(&WithStreamedUnaryMethod_CountTable::StreamedCountTable, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_CountTable() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override { + ::grpc::Status CountTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::TableRowCount* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2715,17 +3026,17 @@ class MilvusService final { template class WithStreamedUnaryMethod_ShowTables : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_ShowTables() { - ::grpc::Service::MarkMethodStreamed(9, + ::grpc::Service::MarkMethodStreamed(4, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::TableNameList>(std::bind(&WithStreamedUnaryMethod_ShowTables::StreamedShowTables, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_ShowTables() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override { + ::grpc::Status ShowTables(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::TableNameList* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2733,79 +3044,59 @@ class MilvusService final { virtual ::grpc::Status StreamedShowTables(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::Command,::milvus::grpc::TableNameList>* server_unary_streamer) = 0; }; template - class WithStreamedUnaryMethod_Cmd : public BaseClass { + class WithStreamedUnaryMethod_DropTable : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithStreamedUnaryMethod_Cmd() { - ::grpc::Service::MarkMethodStreamed(10, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>(std::bind(&WithStreamedUnaryMethod_Cmd::StreamedCmd, this, std::placeholders::_1, std::placeholders::_2))); + WithStreamedUnaryMethod_DropTable() { + ::grpc::Service::MarkMethodStreamed(5, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropTable::StreamedDropTable, this, std::placeholders::_1, std::placeholders::_2))); } - ~WithStreamedUnaryMethod_Cmd() override { + ~WithStreamedUnaryMethod_DropTable() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) override { + ::grpc::Status DropTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } // replace default version of method with streamed unary - virtual ::grpc::Status StreamedCmd(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::Command,::milvus::grpc::StringReply>* server_unary_streamer) = 0; + virtual ::grpc::Status StreamedDropTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; }; template - class WithStreamedUnaryMethod_DeleteByRange : public BaseClass { + class WithStreamedUnaryMethod_CreateIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: - WithStreamedUnaryMethod_DeleteByRange() { - ::grpc::Service::MarkMethodStreamed(11, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DeleteByRange::StreamedDeleteByRange, this, std::placeholders::_1, std::placeholders::_2))); + WithStreamedUnaryMethod_CreateIndex() { + ::grpc::Service::MarkMethodStreamed(6, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_CreateIndex::StreamedCreateIndex, this, std::placeholders::_1, std::placeholders::_2))); } - ~WithStreamedUnaryMethod_DeleteByRange() override { + ~WithStreamedUnaryMethod_CreateIndex() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) override { + ::grpc::Status CreateIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::IndexParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } // replace default version of method with streamed unary - virtual ::grpc::Status StreamedDeleteByRange(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::DeleteByRangeParam,::milvus::grpc::Status>* server_unary_streamer) = 0; - }; - template - class WithStreamedUnaryMethod_PreloadTable : public BaseClass { - private: - void BaseClassMustBeDerivedFromService(const Service *service) {} - public: - WithStreamedUnaryMethod_PreloadTable() { - ::grpc::Service::MarkMethodStreamed(12, - new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_PreloadTable::StreamedPreloadTable, this, std::placeholders::_1, std::placeholders::_2))); - } - ~WithStreamedUnaryMethod_PreloadTable() override { - BaseClassMustBeDerivedFromService(this); - } - // disable regular version of this method - ::grpc::Status PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { - abort(); - return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); - } - // replace default version of method with streamed unary - virtual ::grpc::Status StreamedPreloadTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; + virtual ::grpc::Status StreamedCreateIndex(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::IndexParam,::milvus::grpc::Status>* server_unary_streamer) = 0; }; template class WithStreamedUnaryMethod_DescribeIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_DescribeIndex() { - ::grpc::Service::MarkMethodStreamed(13, + ::grpc::Service::MarkMethodStreamed(7, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>(std::bind(&WithStreamedUnaryMethod_DescribeIndex::StreamedDescribeIndex, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_DescribeIndex() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override { + ::grpc::Status DescribeIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::IndexParam* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } @@ -2815,26 +3106,206 @@ class MilvusService final { template class WithStreamedUnaryMethod_DropIndex : public BaseClass { private: - void BaseClassMustBeDerivedFromService(const Service *service) {} + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} public: WithStreamedUnaryMethod_DropIndex() { - ::grpc::Service::MarkMethodStreamed(14, + ::grpc::Service::MarkMethodStreamed(8, new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropIndex::StreamedDropIndex, this, std::placeholders::_1, std::placeholders::_2))); } ~WithStreamedUnaryMethod_DropIndex() override { BaseClassMustBeDerivedFromService(this); } // disable regular version of this method - ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override { + ::grpc::Status DropIndex(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { abort(); return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); } // replace default version of method with streamed unary virtual ::grpc::Status StreamedDropIndex(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; }; - typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > StreamedUnaryService; + template + class WithStreamedUnaryMethod_CreatePartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_CreatePartition() { + ::grpc::Service::MarkMethodStreamed(9, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_CreatePartition::StreamedCreatePartition, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_CreatePartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status CreatePartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedCreatePartition(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::PartitionParam,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_ShowPartitions : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_ShowPartitions() { + ::grpc::Service::MarkMethodStreamed(10, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>(std::bind(&WithStreamedUnaryMethod_ShowPartitions::StreamedShowPartitions, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_ShowPartitions() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status ShowPartitions(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::PartitionList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedShowPartitions(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::PartitionList>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_DropPartition : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_DropPartition() { + ::grpc::Service::MarkMethodStreamed(11, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DropPartition::StreamedDropPartition, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_DropPartition() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status DropPartition(::grpc::ServerContext* /*context*/, const ::milvus::grpc::PartitionParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedDropPartition(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::PartitionParam,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Insert : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Insert() { + ::grpc::Service::MarkMethodStreamed(12, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(std::bind(&WithStreamedUnaryMethod_Insert::StreamedInsert, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Insert() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Insert(::grpc::ServerContext* /*context*/, const ::milvus::grpc::InsertParam* /*request*/, ::milvus::grpc::VectorIds* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedInsert(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::InsertParam,::milvus::grpc::VectorIds>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Search : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Search() { + ::grpc::Service::MarkMethodStreamed(13, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_Search::StreamedSearch, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Search() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Search(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedSearch(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_SearchInFiles : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_SearchInFiles() { + ::grpc::Service::MarkMethodStreamed(14, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>(std::bind(&WithStreamedUnaryMethod_SearchInFiles::StreamedSearchInFiles, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_SearchInFiles() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status SearchInFiles(::grpc::ServerContext* /*context*/, const ::milvus::grpc::SearchInFilesParam* /*request*/, ::milvus::grpc::TopKQueryResultList* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedSearchInFiles(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::SearchInFilesParam,::milvus::grpc::TopKQueryResultList>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_Cmd : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_Cmd() { + ::grpc::Service::MarkMethodStreamed(15, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::Command, ::milvus::grpc::StringReply>(std::bind(&WithStreamedUnaryMethod_Cmd::StreamedCmd, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_Cmd() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status Cmd(::grpc::ServerContext* /*context*/, const ::milvus::grpc::Command* /*request*/, ::milvus::grpc::StringReply* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedCmd(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::Command,::milvus::grpc::StringReply>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_DeleteByDate : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_DeleteByDate() { + ::grpc::Service::MarkMethodStreamed(16, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_DeleteByDate::StreamedDeleteByDate, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_DeleteByDate() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status DeleteByDate(::grpc::ServerContext* /*context*/, const ::milvus::grpc::DeleteByDateParam* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedDeleteByDate(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::DeleteByDateParam,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + template + class WithStreamedUnaryMethod_PreloadTable : public BaseClass { + private: + void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} + public: + WithStreamedUnaryMethod_PreloadTable() { + ::grpc::Service::MarkMethodStreamed(17, + new ::grpc::internal::StreamedUnaryHandler< ::milvus::grpc::TableName, ::milvus::grpc::Status>(std::bind(&WithStreamedUnaryMethod_PreloadTable::StreamedPreloadTable, this, std::placeholders::_1, std::placeholders::_2))); + } + ~WithStreamedUnaryMethod_PreloadTable() override { + BaseClassMustBeDerivedFromService(this); + } + // disable regular version of this method + ::grpc::Status PreloadTable(::grpc::ServerContext* /*context*/, const ::milvus::grpc::TableName* /*request*/, ::milvus::grpc::Status* /*response*/) override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + // replace default version of method with streamed unary + virtual ::grpc::Status StreamedPreloadTable(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::milvus::grpc::TableName,::milvus::grpc::Status>* server_unary_streamer) = 0; + }; + typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > > > > StreamedUnaryService; typedef Service SplitStreamedService; - typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > StreamedService; + typedef WithStreamedUnaryMethod_CreateTable > > > > > > > > > > > > > > > > > StreamedService; }; } // namespace grpc diff --git a/core/src/grpc/gen-milvus/milvus.pb.cc b/core/src/grpc/gen-milvus/milvus.pb.cc index fe416a4773..c381c4f4db 100644 --- a/core/src/grpc/gen-milvus/milvus.pb.cc +++ b/core/src/grpc/gen-milvus/milvus.pb.cc @@ -16,6 +16,7 @@ // @@protoc_insertion_point(includes) #include extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Index_milvus_2eproto; +extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_PartitionParam_milvus_2eproto; extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_QueryResult_milvus_2eproto; extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Range_milvus_2eproto; extern PROTOBUF_INTERNAL_EXPORT_milvus_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RowRecord_milvus_2eproto; @@ -28,6 +29,10 @@ class TableNameDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; } _TableName_default_instance_; +class PartitionNameDefaultTypeInternal { + public: + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _PartitionName_default_instance_; class TableNameListDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; @@ -36,6 +41,14 @@ class TableSchemaDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; } _TableSchema_default_instance_; +class PartitionParamDefaultTypeInternal { + public: + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _PartitionParam_default_instance_; +class PartitionListDefaultTypeInternal { + public: + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _PartitionList_default_instance_; class RangeDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; @@ -96,10 +109,10 @@ class IndexParamDefaultTypeInternal { public: ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; } _IndexParam_default_instance_; -class DeleteByRangeParamDefaultTypeInternal { +class DeleteByDateParamDefaultTypeInternal { public: - ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; -} _DeleteByRangeParam_default_instance_; + ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed _instance; +} _DeleteByDateParam_default_instance_; } // namespace grpc } // namespace milvus static void InitDefaultsscc_info_BoolReply_milvus_2eproto() { @@ -131,19 +144,19 @@ static void InitDefaultsscc_info_Command_milvus_2eproto() { ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_Command_milvus_2eproto = {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_Command_milvus_2eproto}, {}}; -static void InitDefaultsscc_info_DeleteByRangeParam_milvus_2eproto() { +static void InitDefaultsscc_info_DeleteByDateParam_milvus_2eproto() { GOOGLE_PROTOBUF_VERIFY_VERSION; { - void* ptr = &::milvus::grpc::_DeleteByRangeParam_default_instance_; - new (ptr) ::milvus::grpc::DeleteByRangeParam(); + void* ptr = &::milvus::grpc::_DeleteByDateParam_default_instance_; + new (ptr) ::milvus::grpc::DeleteByDateParam(); ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); } - ::milvus::grpc::DeleteByRangeParam::InitAsDefaultInstance(); + ::milvus::grpc::DeleteByDateParam::InitAsDefaultInstance(); } -::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DeleteByRangeParam_milvus_2eproto = - {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_DeleteByRangeParam_milvus_2eproto}, { +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DeleteByDateParam_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_DeleteByDateParam_milvus_2eproto}, { &scc_info_Range_milvus_2eproto.base,}}; static void InitDefaultsscc_info_Index_milvus_2eproto() { @@ -191,6 +204,50 @@ static void InitDefaultsscc_info_InsertParam_milvus_2eproto() { {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_InsertParam_milvus_2eproto}, { &scc_info_RowRecord_milvus_2eproto.base,}}; +static void InitDefaultsscc_info_PartitionList_milvus_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::milvus::grpc::_PartitionList_default_instance_; + new (ptr) ::milvus::grpc::PartitionList(); + ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); + } + ::milvus::grpc::PartitionList::InitAsDefaultInstance(); +} + +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_PartitionList_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsscc_info_PartitionList_milvus_2eproto}, { + &scc_info_Status_status_2eproto.base, + &scc_info_PartitionParam_milvus_2eproto.base,}}; + +static void InitDefaultsscc_info_PartitionName_milvus_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::milvus::grpc::_PartitionName_default_instance_; + new (ptr) ::milvus::grpc::PartitionName(); + ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); + } + ::milvus::grpc::PartitionName::InitAsDefaultInstance(); +} + +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_PartitionName_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_PartitionName_milvus_2eproto}, {}}; + +static void InitDefaultsscc_info_PartitionParam_milvus_2eproto() { + GOOGLE_PROTOBUF_VERIFY_VERSION; + + { + void* ptr = &::milvus::grpc::_PartitionParam_default_instance_; + new (ptr) ::milvus::grpc::PartitionParam(); + ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); + } + ::milvus::grpc::PartitionParam::InitAsDefaultInstance(); +} + +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_PartitionParam_milvus_2eproto = + {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_PartitionParam_milvus_2eproto}, {}}; + static void InitDefaultsscc_info_QueryResult_milvus_2eproto() { GOOGLE_PROTOBUF_VERIFY_VERSION; @@ -384,7 +441,7 @@ static void InitDefaultsscc_info_VectorIds_milvus_2eproto() { {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_VectorIds_milvus_2eproto}, { &scc_info_Status_status_2eproto.base,}}; -static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_milvus_2eproto[19]; +static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_milvus_2eproto[22]; static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_milvus_2eproto = nullptr; static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_milvus_2eproto = nullptr; @@ -396,6 +453,12 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT ~0u, // no _weak_field_map_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableName, table_name_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionName, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionName, partition_name_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableNameList, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -413,6 +476,21 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableSchema, index_file_size_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::TableSchema, metric_type_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, table_name_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, partition_name_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionParam, tag_), + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionList, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionList, status_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::PartitionList, partition_array_), + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::Range, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ @@ -433,6 +511,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, table_name_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, row_record_array_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, row_id_array_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::InsertParam, partition_tag_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::VectorIds, _internal_metadata_), ~0u, // no _extensions_ @@ -450,6 +529,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, query_range_array_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, topk_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, nprobe_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchParam, partition_tag_array_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::milvus::grpc::SearchInFilesParam, _internal_metadata_), ~0u, // no _extensions_ @@ -520,39 +600,45 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT PROTOBUF_FIELD_OFFSET(::milvus::grpc::IndexParam, table_name_), PROTOBUF_FIELD_OFFSET(::milvus::grpc::IndexParam, index_), ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByRangeParam, _internal_metadata_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByDateParam, _internal_metadata_), ~0u, // no _extensions_ ~0u, // no _oneof_case_ ~0u, // no _weak_field_map_ - PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByRangeParam, range_), - PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByRangeParam, table_name_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByDateParam, range_), + PROTOBUF_FIELD_OFFSET(::milvus::grpc::DeleteByDateParam, table_name_), }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { { 0, -1, sizeof(::milvus::grpc::TableName)}, - { 6, -1, sizeof(::milvus::grpc::TableNameList)}, - { 13, -1, sizeof(::milvus::grpc::TableSchema)}, - { 23, -1, sizeof(::milvus::grpc::Range)}, - { 30, -1, sizeof(::milvus::grpc::RowRecord)}, - { 36, -1, sizeof(::milvus::grpc::InsertParam)}, - { 44, -1, sizeof(::milvus::grpc::VectorIds)}, - { 51, -1, sizeof(::milvus::grpc::SearchParam)}, - { 61, -1, sizeof(::milvus::grpc::SearchInFilesParam)}, - { 68, -1, sizeof(::milvus::grpc::QueryResult)}, - { 75, -1, sizeof(::milvus::grpc::TopKQueryResult)}, - { 81, -1, sizeof(::milvus::grpc::TopKQueryResultList)}, - { 88, -1, sizeof(::milvus::grpc::StringReply)}, - { 95, -1, sizeof(::milvus::grpc::BoolReply)}, - { 102, -1, sizeof(::milvus::grpc::TableRowCount)}, - { 109, -1, sizeof(::milvus::grpc::Command)}, - { 115, -1, sizeof(::milvus::grpc::Index)}, - { 122, -1, sizeof(::milvus::grpc::IndexParam)}, - { 130, -1, sizeof(::milvus::grpc::DeleteByRangeParam)}, + { 6, -1, sizeof(::milvus::grpc::PartitionName)}, + { 12, -1, sizeof(::milvus::grpc::TableNameList)}, + { 19, -1, sizeof(::milvus::grpc::TableSchema)}, + { 29, -1, sizeof(::milvus::grpc::PartitionParam)}, + { 37, -1, sizeof(::milvus::grpc::PartitionList)}, + { 44, -1, sizeof(::milvus::grpc::Range)}, + { 51, -1, sizeof(::milvus::grpc::RowRecord)}, + { 57, -1, sizeof(::milvus::grpc::InsertParam)}, + { 66, -1, sizeof(::milvus::grpc::VectorIds)}, + { 73, -1, sizeof(::milvus::grpc::SearchParam)}, + { 84, -1, sizeof(::milvus::grpc::SearchInFilesParam)}, + { 91, -1, sizeof(::milvus::grpc::QueryResult)}, + { 98, -1, sizeof(::milvus::grpc::TopKQueryResult)}, + { 104, -1, sizeof(::milvus::grpc::TopKQueryResultList)}, + { 111, -1, sizeof(::milvus::grpc::StringReply)}, + { 118, -1, sizeof(::milvus::grpc::BoolReply)}, + { 125, -1, sizeof(::milvus::grpc::TableRowCount)}, + { 132, -1, sizeof(::milvus::grpc::Command)}, + { 138, -1, sizeof(::milvus::grpc::Index)}, + { 145, -1, sizeof(::milvus::grpc::IndexParam)}, + { 153, -1, sizeof(::milvus::grpc::DeleteByDateParam)}, }; static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { reinterpret_cast(&::milvus::grpc::_TableName_default_instance_), + reinterpret_cast(&::milvus::grpc::_PartitionName_default_instance_), reinterpret_cast(&::milvus::grpc::_TableNameList_default_instance_), reinterpret_cast(&::milvus::grpc::_TableSchema_default_instance_), + reinterpret_cast(&::milvus::grpc::_PartitionParam_default_instance_), + reinterpret_cast(&::milvus::grpc::_PartitionList_default_instance_), reinterpret_cast(&::milvus::grpc::_Range_default_instance_), reinterpret_cast(&::milvus::grpc::_RowRecord_default_instance_), reinterpret_cast(&::milvus::grpc::_InsertParam_default_instance_), @@ -568,85 +654,100 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = reinterpret_cast(&::milvus::grpc::_Command_default_instance_), reinterpret_cast(&::milvus::grpc::_Index_default_instance_), reinterpret_cast(&::milvus::grpc::_IndexParam_default_instance_), - reinterpret_cast(&::milvus::grpc::_DeleteByRangeParam_default_instance_), + reinterpret_cast(&::milvus::grpc::_DeleteByDateParam_default_instance_), }; const char descriptor_table_protodef_milvus_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = "\n\014milvus.proto\022\013milvus.grpc\032\014status.prot" - "o\"\037\n\tTableName\022\022\n\ntable_name\030\001 \001(\t\"I\n\rTa" - "bleNameList\022#\n\006status\030\001 \001(\0132\023.milvus.grp" - "c.Status\022\023\n\013table_names\030\002 \003(\t\"\207\001\n\013TableS" - "chema\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.Stat" - "us\022\022\n\ntable_name\030\002 \001(\t\022\021\n\tdimension\030\003 \001(" - "\003\022\027\n\017index_file_size\030\004 \001(\003\022\023\n\013metric_typ" - "e\030\005 \001(\005\"/\n\005Range\022\023\n\013start_value\030\001 \001(\t\022\021\n" - "\tend_value\030\002 \001(\t\" \n\tRowRecord\022\023\n\013vector_" - "data\030\001 \003(\002\"i\n\013InsertParam\022\022\n\ntable_name\030" - "\001 \001(\t\0220\n\020row_record_array\030\002 \003(\0132\026.milvus" - ".grpc.RowRecord\022\024\n\014row_id_array\030\003 \003(\003\"I\n" - "\tVectorIds\022#\n\006status\030\001 \001(\0132\023.milvus.grpc" - ".Status\022\027\n\017vector_id_array\030\002 \003(\003\"\242\001\n\013Sea" - "rchParam\022\022\n\ntable_name\030\001 \001(\t\0222\n\022query_re" + "o\"\037\n\tTableName\022\022\n\ntable_name\030\001 \001(\t\"\'\n\rPa" + "rtitionName\022\026\n\016partition_name\030\001 \001(\t\"I\n\rT" + "ableNameList\022#\n\006status\030\001 \001(\0132\023.milvus.gr" + "pc.Status\022\023\n\013table_names\030\002 \003(\t\"\207\001\n\013Table" + "Schema\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.Sta" + "tus\022\022\n\ntable_name\030\002 \001(\t\022\021\n\tdimension\030\003 \001" + "(\003\022\027\n\017index_file_size\030\004 \001(\003\022\023\n\013metric_ty" + "pe\030\005 \001(\005\"I\n\016PartitionParam\022\022\n\ntable_name" + "\030\001 \001(\t\022\026\n\016partition_name\030\002 \001(\t\022\013\n\003tag\030\003 " + "\001(\t\"j\n\rPartitionList\022#\n\006status\030\001 \001(\0132\023.m" + "ilvus.grpc.Status\0224\n\017partition_array\030\002 \003" + "(\0132\033.milvus.grpc.PartitionParam\"/\n\005Range" + "\022\023\n\013start_value\030\001 \001(\t\022\021\n\tend_value\030\002 \001(\t" + "\" \n\tRowRecord\022\023\n\013vector_data\030\001 \003(\002\"\200\001\n\013I" + "nsertParam\022\022\n\ntable_name\030\001 \001(\t\0220\n\020row_re" "cord_array\030\002 \003(\0132\026.milvus.grpc.RowRecord" - "\022-\n\021query_range_array\030\003 \003(\0132\022.milvus.grp" - "c.Range\022\014\n\004topk\030\004 \001(\003\022\016\n\006nprobe\030\005 \001(\003\"[\n" - "\022SearchInFilesParam\022\025\n\rfile_id_array\030\001 \003" - "(\t\022.\n\014search_param\030\002 \001(\0132\030.milvus.grpc.S" - "earchParam\"+\n\013QueryResult\022\n\n\002id\030\001 \001(\003\022\020\n" - "\010distance\030\002 \001(\001\"H\n\017TopKQueryResult\0225\n\023qu" - "ery_result_arrays\030\001 \003(\0132\030.milvus.grpc.Qu" - "eryResult\"s\n\023TopKQueryResultList\022#\n\006stat" - "us\030\001 \001(\0132\023.milvus.grpc.Status\0227\n\021topk_qu" - "ery_result\030\002 \003(\0132\034.milvus.grpc.TopKQuery" - "Result\"H\n\013StringReply\022#\n\006status\030\001 \001(\0132\023." - "milvus.grpc.Status\022\024\n\014string_reply\030\002 \001(\t" - "\"D\n\tBoolReply\022#\n\006status\030\001 \001(\0132\023.milvus.g" - "rpc.Status\022\022\n\nbool_reply\030\002 \001(\010\"M\n\rTableR" - "owCount\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.St" - "atus\022\027\n\017table_row_count\030\002 \001(\003\"\026\n\007Command" - "\022\013\n\003cmd\030\001 \001(\t\"*\n\005Index\022\022\n\nindex_type\030\001 \001" - "(\005\022\r\n\005nlist\030\002 \001(\005\"h\n\nIndexParam\022#\n\006statu" - "s\030\001 \001(\0132\023.milvus.grpc.Status\022\022\n\ntable_na" - "me\030\002 \001(\t\022!\n\005index\030\003 \001(\0132\022.milvus.grpc.In" - "dex\"K\n\022DeleteByRangeParam\022!\n\005range\030\001 \001(\013" - "2\022.milvus.grpc.Range\022\022\n\ntable_name\030\002 \001(\t" - "2\360\007\n\rMilvusService\022>\n\013CreateTable\022\030.milv" - "us.grpc.TableSchema\032\023.milvus.grpc.Status" - "\"\000\022<\n\010HasTable\022\026.milvus.grpc.TableName\032\026" - ".milvus.grpc.BoolReply\"\000\022:\n\tDropTable\022\026." - "milvus.grpc.TableName\032\023.milvus.grpc.Stat" - "us\"\000\022=\n\013CreateIndex\022\027.milvus.grpc.IndexP" - "aram\032\023.milvus.grpc.Status\"\000\022<\n\006Insert\022\030." - "milvus.grpc.InsertParam\032\026.milvus.grpc.Ve" - "ctorIds\"\000\022F\n\006Search\022\030.milvus.grpc.Search" - "Param\032 .milvus.grpc.TopKQueryResultList\"" - "\000\022T\n\rSearchInFiles\022\037.milvus.grpc.SearchI" - "nFilesParam\032 .milvus.grpc.TopKQueryResul" - "tList\"\000\022C\n\rDescribeTable\022\026.milvus.grpc.T" - "ableName\032\030.milvus.grpc.TableSchema\"\000\022B\n\n" - "CountTable\022\026.milvus.grpc.TableName\032\032.mil" - "vus.grpc.TableRowCount\"\000\022@\n\nShowTables\022\024" - ".milvus.grpc.Command\032\032.milvus.grpc.Table" - "NameList\"\000\0227\n\003Cmd\022\024.milvus.grpc.Command\032" - "\030.milvus.grpc.StringReply\"\000\022G\n\rDeleteByR" - "ange\022\037.milvus.grpc.DeleteByRangeParam\032\023." - "milvus.grpc.Status\"\000\022=\n\014PreloadTable\022\026.m" + "\022\024\n\014row_id_array\030\003 \003(\003\022\025\n\rpartition_tag\030" + "\004 \001(\t\"I\n\tVectorIds\022#\n\006status\030\001 \001(\0132\023.mil" + "vus.grpc.Status\022\027\n\017vector_id_array\030\002 \003(\003" + "\"\277\001\n\013SearchParam\022\022\n\ntable_name\030\001 \001(\t\0222\n\022" + "query_record_array\030\002 \003(\0132\026.milvus.grpc.R" + "owRecord\022-\n\021query_range_array\030\003 \003(\0132\022.mi" + "lvus.grpc.Range\022\014\n\004topk\030\004 \001(\003\022\016\n\006nprobe\030" + "\005 \001(\003\022\033\n\023partition_tag_array\030\006 \003(\t\"[\n\022Se" + "archInFilesParam\022\025\n\rfile_id_array\030\001 \003(\t\022" + ".\n\014search_param\030\002 \001(\0132\030.milvus.grpc.Sear" + "chParam\"+\n\013QueryResult\022\n\n\002id\030\001 \001(\003\022\020\n\010di" + "stance\030\002 \001(\001\"H\n\017TopKQueryResult\0225\n\023query" + "_result_arrays\030\001 \003(\0132\030.milvus.grpc.Query" + "Result\"s\n\023TopKQueryResultList\022#\n\006status\030" + "\001 \001(\0132\023.milvus.grpc.Status\0227\n\021topk_query" + "_result\030\002 \003(\0132\034.milvus.grpc.TopKQueryRes" + "ult\"H\n\013StringReply\022#\n\006status\030\001 \001(\0132\023.mil" + "vus.grpc.Status\022\024\n\014string_reply\030\002 \001(\t\"D\n" + "\tBoolReply\022#\n\006status\030\001 \001(\0132\023.milvus.grpc" + ".Status\022\022\n\nbool_reply\030\002 \001(\010\"M\n\rTableRowC" + "ount\022#\n\006status\030\001 \001(\0132\023.milvus.grpc.Statu" + "s\022\027\n\017table_row_count\030\002 \001(\003\"\026\n\007Command\022\013\n" + "\003cmd\030\001 \001(\t\"*\n\005Index\022\022\n\nindex_type\030\001 \001(\005\022" + "\r\n\005nlist\030\002 \001(\005\"h\n\nIndexParam\022#\n\006status\030\001" + " \001(\0132\023.milvus.grpc.Status\022\022\n\ntable_name\030" + "\002 \001(\t\022!\n\005index\030\003 \001(\0132\022.milvus.grpc.Index" + "\"J\n\021DeleteByDateParam\022!\n\005range\030\001 \001(\0132\022.m" + "ilvus.grpc.Range\022\022\n\ntable_name\030\002 \001(\t2\302\t\n" + "\rMilvusService\022>\n\013CreateTable\022\030.milvus.g" + "rpc.TableSchema\032\023.milvus.grpc.Status\"\000\022<" + "\n\010HasTable\022\026.milvus.grpc.TableName\032\026.mil" + "vus.grpc.BoolReply\"\000\022C\n\rDescribeTable\022\026." + "milvus.grpc.TableName\032\030.milvus.grpc.Tabl" + "eSchema\"\000\022B\n\nCountTable\022\026.milvus.grpc.Ta" + "bleName\032\032.milvus.grpc.TableRowCount\"\000\022@\n" + "\nShowTables\022\024.milvus.grpc.Command\032\032.milv" + "us.grpc.TableNameList\"\000\022:\n\tDropTable\022\026.m" "ilvus.grpc.TableName\032\023.milvus.grpc.Statu" - "s\"\000\022B\n\rDescribeIndex\022\026.milvus.grpc.Table" - "Name\032\027.milvus.grpc.IndexParam\"\000\022:\n\tDropI" - "ndex\022\026.milvus.grpc.TableName\032\023.milvus.gr" - "pc.Status\"\000b\006proto3" + "s\"\000\022=\n\013CreateIndex\022\027.milvus.grpc.IndexPa" + "ram\032\023.milvus.grpc.Status\"\000\022B\n\rDescribeIn" + "dex\022\026.milvus.grpc.TableName\032\027.milvus.grp" + "c.IndexParam\"\000\022:\n\tDropIndex\022\026.milvus.grp" + "c.TableName\032\023.milvus.grpc.Status\"\000\022E\n\017Cr" + "eatePartition\022\033.milvus.grpc.PartitionPar" + "am\032\023.milvus.grpc.Status\"\000\022F\n\016ShowPartiti" + "ons\022\026.milvus.grpc.TableName\032\032.milvus.grp" + "c.PartitionList\"\000\022C\n\rDropPartition\022\033.mil" + "vus.grpc.PartitionParam\032\023.milvus.grpc.St" + "atus\"\000\022<\n\006Insert\022\030.milvus.grpc.InsertPar" + "am\032\026.milvus.grpc.VectorIds\"\000\022F\n\006Search\022\030" + ".milvus.grpc.SearchParam\032 .milvus.grpc.T" + "opKQueryResultList\"\000\022T\n\rSearchInFiles\022\037." + "milvus.grpc.SearchInFilesParam\032 .milvus." + "grpc.TopKQueryResultList\"\000\0227\n\003Cmd\022\024.milv" + "us.grpc.Command\032\030.milvus.grpc.StringRepl" + "y\"\000\022E\n\014DeleteByDate\022\036.milvus.grpc.Delete" + "ByDateParam\032\023.milvus.grpc.Status\"\000\022=\n\014Pr" + "eloadTable\022\026.milvus.grpc.TableName\032\023.mil" + "vus.grpc.Status\"\000b\006proto3" ; static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_milvus_2eproto_deps[1] = { &::descriptor_table_status_2eproto, }; -static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_milvus_2eproto_sccs[19] = { +static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_milvus_2eproto_sccs[22] = { &scc_info_BoolReply_milvus_2eproto.base, &scc_info_Command_milvus_2eproto.base, - &scc_info_DeleteByRangeParam_milvus_2eproto.base, + &scc_info_DeleteByDateParam_milvus_2eproto.base, &scc_info_Index_milvus_2eproto.base, &scc_info_IndexParam_milvus_2eproto.base, &scc_info_InsertParam_milvus_2eproto.base, + &scc_info_PartitionList_milvus_2eproto.base, + &scc_info_PartitionName_milvus_2eproto.base, + &scc_info_PartitionParam_milvus_2eproto.base, &scc_info_QueryResult_milvus_2eproto.base, &scc_info_Range_milvus_2eproto.base, &scc_info_RowRecord_milvus_2eproto.base, @@ -664,10 +765,10 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_mil static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_milvus_2eproto_once; static bool descriptor_table_milvus_2eproto_initialized = false; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_milvus_2eproto = { - &descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 2539, - &descriptor_table_milvus_2eproto_once, descriptor_table_milvus_2eproto_sccs, descriptor_table_milvus_2eproto_deps, 19, 1, + &descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 3025, + &descriptor_table_milvus_2eproto_once, descriptor_table_milvus_2eproto_sccs, descriptor_table_milvus_2eproto_deps, 22, 1, schemas, file_default_instances, TableStruct_milvus_2eproto::offsets, - file_level_metadata_milvus_2eproto, 19, file_level_enum_descriptors_milvus_2eproto, file_level_service_descriptors_milvus_2eproto, + file_level_metadata_milvus_2eproto, 22, file_level_enum_descriptors_milvus_2eproto, file_level_service_descriptors_milvus_2eproto, }; // Force running AddDescriptors() at dynamic initialization time. @@ -944,6 +1045,275 @@ void TableName::InternalSwap(TableName* other) { } +// =================================================================== + +void PartitionName::InitAsDefaultInstance() { +} +class PartitionName::_Internal { + public: +}; + +PartitionName::PartitionName() + : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:milvus.grpc.PartitionName) +} +PartitionName::PartitionName(const PartitionName& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.partition_name().empty()) { + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } + // @@protoc_insertion_point(copy_constructor:milvus.grpc.PartitionName) +} + +void PartitionName::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_PartitionName_milvus_2eproto.base); + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +PartitionName::~PartitionName() { + // @@protoc_insertion_point(destructor:milvus.grpc.PartitionName) + SharedDtor(); +} + +void PartitionName::SharedDtor() { + partition_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void PartitionName::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const PartitionName& PartitionName::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_PartitionName_milvus_2eproto.base); + return *internal_default_instance(); +} + + +void PartitionName::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* PartitionName::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + CHK_(ptr); + switch (tag >> 3) { + // string partition_name = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_partition_name(), ptr, ctx, "milvus.grpc.PartitionName.partition_name"); + CHK_(ptr); + } else goto handle_unusual; + continue; + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->SetLastTag(tag); + goto success; + } + ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx); + CHK_(ptr != nullptr); + continue; + } + } // switch + } // while +success: + return ptr; +failure: + ptr = nullptr; + goto success; +#undef CHK_ +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool PartitionName::MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + // @@protoc_insertion_point(parse_start:milvus.grpc.PartitionName) + for (;;) { + ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // string partition_name = 1; + case 1: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_partition_name())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionName.partition_name")); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:milvus.grpc.PartitionName) + return true; +failure: + // @@protoc_insertion_point(parse_failure:milvus.grpc.PartitionName) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void PartitionName::SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string partition_name = 1; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionName.partition_name"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 1, this->partition_name(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:milvus.grpc.PartitionName) +} + +::PROTOBUF_NAMESPACE_ID::uint8* PartitionName::InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string partition_name = 1; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionName.partition_name"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 1, this->partition_name(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.PartitionName) + return target; +} + +size_t PartitionName::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.PartitionName) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string partition_name = 1; + if (this->partition_name().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_name()); + } + + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void PartitionName::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.PartitionName) + GOOGLE_DCHECK_NE(&from, this); + const PartitionName* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.PartitionName) + ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.PartitionName) + MergeFrom(*source); + } +} + +void PartitionName::MergeFrom(const PartitionName& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.PartitionName) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.partition_name().size() > 0) { + + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } +} + +void PartitionName::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.PartitionName) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void PartitionName::CopyFrom(const PartitionName& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.PartitionName) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PartitionName::IsInitialized() const { + return true; +} + +void PartitionName::InternalSwap(PartitionName* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + partition_name_.Swap(&other->partition_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PartitionName::GetMetadata() const { + return GetMetadataStatic(); +} + + // =================================================================== void TableNameList::InitAsDefaultInstance() { @@ -1742,6 +2112,728 @@ void TableSchema::InternalSwap(TableSchema* other) { } +// =================================================================== + +void PartitionParam::InitAsDefaultInstance() { +} +class PartitionParam::_Internal { + public: +}; + +PartitionParam::PartitionParam() + : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:milvus.grpc.PartitionParam) +} +PartitionParam::PartitionParam(const PartitionParam& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + _internal_metadata_(nullptr) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.table_name().empty()) { + table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); + } + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.partition_name().empty()) { + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } + tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.tag().empty()) { + tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.tag_); + } + // @@protoc_insertion_point(copy_constructor:milvus.grpc.PartitionParam) +} + +void PartitionParam::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_PartitionParam_milvus_2eproto.base); + table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +PartitionParam::~PartitionParam() { + // @@protoc_insertion_point(destructor:milvus.grpc.PartitionParam) + SharedDtor(); +} + +void PartitionParam::SharedDtor() { + table_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + tag_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} + +void PartitionParam::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const PartitionParam& PartitionParam::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_PartitionParam_milvus_2eproto.base); + return *internal_default_instance(); +} + + +void PartitionParam::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* PartitionParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + CHK_(ptr); + switch (tag >> 3) { + // string table_name = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_table_name(), ptr, ctx, "milvus.grpc.PartitionParam.table_name"); + CHK_(ptr); + } else goto handle_unusual; + continue; + // string partition_name = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_partition_name(), ptr, ctx, "milvus.grpc.PartitionParam.partition_name"); + CHK_(ptr); + } else goto handle_unusual; + continue; + // string tag = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_tag(), ptr, ctx, "milvus.grpc.PartitionParam.tag"); + CHK_(ptr); + } else goto handle_unusual; + continue; + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->SetLastTag(tag); + goto success; + } + ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx); + CHK_(ptr != nullptr); + continue; + } + } // switch + } // while +success: + return ptr; +failure: + ptr = nullptr; + goto success; +#undef CHK_ +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool PartitionParam::MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + // @@protoc_insertion_point(parse_start:milvus.grpc.PartitionParam) + for (;;) { + ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // string table_name = 1; + case 1: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_table_name())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->table_name().data(), static_cast(this->table_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionParam.table_name")); + } else { + goto handle_unusual; + } + break; + } + + // string partition_name = 2; + case 2: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_partition_name())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionParam.partition_name")); + } else { + goto handle_unusual; + } + break; + } + + // string tag = 3; + case 3: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_tag())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->tag().data(), static_cast(this->tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.PartitionParam.tag")); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:milvus.grpc.PartitionParam) + return true; +failure: + // @@protoc_insertion_point(parse_failure:milvus.grpc.PartitionParam) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void PartitionParam::SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string table_name = 1; + if (this->table_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->table_name().data(), static_cast(this->table_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.table_name"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 1, this->table_name(), output); + } + + // string partition_name = 2; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.partition_name"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 2, this->partition_name(), output); + } + + // string tag = 3; + if (this->tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->tag().data(), static_cast(this->tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.tag"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 3, this->tag(), output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:milvus.grpc.PartitionParam) +} + +::PROTOBUF_NAMESPACE_ID::uint8* PartitionParam::InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // string table_name = 1; + if (this->table_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->table_name().data(), static_cast(this->table_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.table_name"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 1, this->table_name(), target); + } + + // string partition_name = 2; + if (this->partition_name().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_name().data(), static_cast(this->partition_name().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.partition_name"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 2, this->partition_name(), target); + } + + // string tag = 3; + if (this->tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->tag().data(), static_cast(this->tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.PartitionParam.tag"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 3, this->tag(), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.PartitionParam) + return target; +} + +size_t PartitionParam::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.PartitionParam) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // string table_name = 1; + if (this->table_name().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->table_name()); + } + + // string partition_name = 2; + if (this->partition_name().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_name()); + } + + // string tag = 3; + if (this->tag().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->tag()); + } + + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void PartitionParam::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.PartitionParam) + GOOGLE_DCHECK_NE(&from, this); + const PartitionParam* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.PartitionParam) + ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.PartitionParam) + MergeFrom(*source); + } +} + +void PartitionParam::MergeFrom(const PartitionParam& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.PartitionParam) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + if (from.table_name().size() > 0) { + + table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); + } + if (from.partition_name().size() > 0) { + + partition_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_name_); + } + if (from.tag().size() > 0) { + + tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.tag_); + } +} + +void PartitionParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.PartitionParam) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void PartitionParam::CopyFrom(const PartitionParam& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.PartitionParam) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PartitionParam::IsInitialized() const { + return true; +} + +void PartitionParam::InternalSwap(PartitionParam* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); + partition_name_.Swap(&other->partition_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); + tag_.Swap(&other->tag_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PartitionParam::GetMetadata() const { + return GetMetadataStatic(); +} + + +// =================================================================== + +void PartitionList::InitAsDefaultInstance() { + ::milvus::grpc::_PartitionList_default_instance_._instance.get_mutable()->status_ = const_cast< ::milvus::grpc::Status*>( + ::milvus::grpc::Status::internal_default_instance()); +} +class PartitionList::_Internal { + public: + static const ::milvus::grpc::Status& status(const PartitionList* msg); +}; + +const ::milvus::grpc::Status& +PartitionList::_Internal::status(const PartitionList* msg) { + return *msg->status_; +} +void PartitionList::clear_status() { + if (GetArenaNoVirtual() == nullptr && status_ != nullptr) { + delete status_; + } + status_ = nullptr; +} +PartitionList::PartitionList() + : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { + SharedCtor(); + // @@protoc_insertion_point(constructor:milvus.grpc.PartitionList) +} +PartitionList::PartitionList(const PartitionList& from) + : ::PROTOBUF_NAMESPACE_ID::Message(), + _internal_metadata_(nullptr), + partition_array_(from.partition_array_) { + _internal_metadata_.MergeFrom(from._internal_metadata_); + if (from.has_status()) { + status_ = new ::milvus::grpc::Status(*from.status_); + } else { + status_ = nullptr; + } + // @@protoc_insertion_point(copy_constructor:milvus.grpc.PartitionList) +} + +void PartitionList::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_PartitionList_milvus_2eproto.base); + status_ = nullptr; +} + +PartitionList::~PartitionList() { + // @@protoc_insertion_point(destructor:milvus.grpc.PartitionList) + SharedDtor(); +} + +void PartitionList::SharedDtor() { + if (this != internal_default_instance()) delete status_; +} + +void PartitionList::SetCachedSize(int size) const { + _cached_size_.Set(size); +} +const PartitionList& PartitionList::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_PartitionList_milvus_2eproto.base); + return *internal_default_instance(); +} + + +void PartitionList::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + partition_array_.Clear(); + if (GetArenaNoVirtual() == nullptr && status_ != nullptr) { + delete status_; + } + status_ = nullptr; + _internal_metadata_.Clear(); +} + +#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +const char* PartitionList::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + while (!ctx->Done(&ptr)) { + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); + CHK_(ptr); + switch (tag >> 3) { + // .milvus.grpc.Status status = 1; + case 1: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) { + ptr = ctx->ParseMessage(mutable_status(), ptr); + CHK_(ptr); + } else goto handle_unusual; + continue; + // repeated .milvus.grpc.PartitionParam partition_array = 2; + case 2: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { + ptr -= 1; + do { + ptr += 1; + ptr = ctx->ParseMessage(add_partition_array(), ptr); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18); + } else goto handle_unusual; + continue; + default: { + handle_unusual: + if ((tag & 7) == 4 || tag == 0) { + ctx->SetLastTag(tag); + goto success; + } + ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx); + CHK_(ptr != nullptr); + continue; + } + } // switch + } // while +success: + return ptr; +failure: + ptr = nullptr; + goto success; +#undef CHK_ +} +#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER +bool PartitionList::MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure + ::PROTOBUF_NAMESPACE_ID::uint32 tag; + // @@protoc_insertion_point(parse_start:milvus.grpc.PartitionList) + for (;;) { + ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // .milvus.grpc.Status status = 1; + case 1: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage( + input, mutable_status())); + } else { + goto handle_unusual; + } + break; + } + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + case 2: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage( + input, add_partition_array())); + } else { + goto handle_unusual; + } + break; + } + + default: { + handle_unusual: + if (tag == 0) { + goto success; + } + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField( + input, tag, _internal_metadata_.mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:milvus.grpc.PartitionList) + return true; +failure: + // @@protoc_insertion_point(parse_failure:milvus.grpc.PartitionList) + return false; +#undef DO_ +} +#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + +void PartitionList::SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .milvus.grpc.Status status = 1; + if (this->has_status()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray( + 1, _Internal::status(this), output); + } + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + for (unsigned int i = 0, + n = static_cast(this->partition_array_size()); i < n; i++) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray( + 2, + this->partition_array(static_cast(i)), + output); + } + + if (_internal_metadata_.have_unknown_fields()) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( + _internal_metadata_.unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:milvus.grpc.PartitionList) +} + +::PROTOBUF_NAMESPACE_ID::uint8* PartitionList::InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + // .milvus.grpc.Status status = 1; + if (this->has_status()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessageToArray( + 1, _Internal::status(this), target); + } + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + for (unsigned int i = 0, + n = static_cast(this->partition_array_size()); i < n; i++) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessageToArray( + 2, this->partition_array(static_cast(i)), target); + } + + if (_internal_metadata_.have_unknown_fields()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( + _internal_metadata_.unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.PartitionList) + return target; +} + +size_t PartitionList::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.PartitionList) + size_t total_size = 0; + + if (_internal_metadata_.have_unknown_fields()) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize( + _internal_metadata_.unknown_fields()); + } + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + // Prevent compiler warnings about cached_has_bits being unused + (void) cached_has_bits; + + // repeated .milvus.grpc.PartitionParam partition_array = 2; + { + unsigned int count = static_cast(this->partition_array_size()); + total_size += 1UL * count; + for (unsigned int i = 0; i < count; i++) { + total_size += + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + this->partition_array(static_cast(i))); + } + } + + // .milvus.grpc.Status status = 1; + if (this->has_status()) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *status_); + } + + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); + SetCachedSize(cached_size); + return total_size; +} + +void PartitionList::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.PartitionList) + GOOGLE_DCHECK_NE(&from, this); + const PartitionList* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + &from); + if (source == nullptr) { + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.PartitionList) + ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); + } else { + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.PartitionList) + MergeFrom(*source); + } +} + +void PartitionList::MergeFrom(const PartitionList& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.PartitionList) + GOOGLE_DCHECK_NE(&from, this); + _internal_metadata_.MergeFrom(from._internal_metadata_); + ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; + (void) cached_has_bits; + + partition_array_.MergeFrom(from.partition_array_); + if (from.has_status()) { + mutable_status()->::milvus::grpc::Status::MergeFrom(from.status()); + } +} + +void PartitionList::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.PartitionList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void PartitionList::CopyFrom(const PartitionList& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.PartitionList) + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool PartitionList::IsInitialized() const { + return true; +} + +void PartitionList::InternalSwap(PartitionList* other) { + using std::swap; + _internal_metadata_.Swap(&other->_internal_metadata_); + CastToBase(&partition_array_)->InternalSwap(CastToBase(&other->partition_array_)); + swap(status_, other->status_); +} + +::PROTOBUF_NAMESPACE_ID::Metadata PartitionList::GetMetadata() const { + return GetMetadataStatic(); +} + + // =================================================================== void Range::InitAsDefaultInstance() { @@ -2369,12 +3461,17 @@ InsertParam::InsertParam(const InsertParam& from) if (!from.table_name().empty()) { table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); } + partition_tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + if (!from.partition_tag().empty()) { + partition_tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_tag_); + } // @@protoc_insertion_point(copy_constructor:milvus.grpc.InsertParam) } void InsertParam::SharedCtor() { ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_InsertParam_milvus_2eproto.base); table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_tag_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } InsertParam::~InsertParam() { @@ -2384,6 +3481,7 @@ InsertParam::~InsertParam() { void InsertParam::SharedDtor() { table_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_tag_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } void InsertParam::SetCachedSize(int size) const { @@ -2404,6 +3502,7 @@ void InsertParam::Clear() { row_record_array_.Clear(); row_id_array_.Clear(); table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); + partition_tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); _internal_metadata_.Clear(); } @@ -2444,6 +3543,13 @@ const char* InsertParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID CHK_(ptr); } else goto handle_unusual; continue; + // string partition_tag = 4; + case 4: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) { + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_partition_tag(), ptr, ctx, "milvus.grpc.InsertParam.partition_tag"); + CHK_(ptr); + } else goto handle_unusual; + continue; default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -2516,6 +3622,21 @@ bool InsertParam::MergePartialFromCodedStream( break; } + // string partition_tag = 4; + case 4: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->mutable_partition_tag())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag().data(), static_cast(this->partition_tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.InsertParam.partition_tag")); + } else { + goto handle_unusual; + } + break; + } + default: { handle_unusual: if (tag == 0) { @@ -2573,6 +3694,16 @@ void InsertParam::SerializeWithCachedSizes( this->row_id_array(i), output); } + // string partition_tag = 4; + if (this->partition_tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag().data(), static_cast(this->partition_tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.InsertParam.partition_tag"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( + 4, this->partition_tag(), output); + } + if (_internal_metadata_.have_unknown_fields()) { ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -2618,6 +3749,17 @@ void InsertParam::SerializeWithCachedSizes( WriteInt64NoTagToArray(this->row_id_array_, target); } + // string partition_tag = 4; + if (this->partition_tag().size() > 0) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag().data(), static_cast(this->partition_tag().length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.InsertParam.partition_tag"); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( + 4, this->partition_tag(), target); + } + if (_internal_metadata_.have_unknown_fields()) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -2672,6 +3814,13 @@ size_t InsertParam::ByteSizeLong() const { this->table_name()); } + // string partition_tag = 4; + if (this->partition_tag().size() > 0) { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_tag()); + } + int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); SetCachedSize(cached_size); return total_size; @@ -2705,6 +3854,10 @@ void InsertParam::MergeFrom(const InsertParam& from) { table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); } + if (from.partition_tag().size() > 0) { + + partition_tag_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.partition_tag_); + } } void InsertParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { @@ -2732,6 +3885,8 @@ void InsertParam::InternalSwap(InsertParam* other) { row_id_array_.InternalSwap(&other->row_id_array_); table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); + partition_tag_.Swap(&other->partition_tag_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArenaNoVirtual()); } ::PROTOBUF_NAMESPACE_ID::Metadata InsertParam::GetMetadata() const { @@ -3097,7 +4252,8 @@ SearchParam::SearchParam(const SearchParam& from) : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr), query_record_array_(from.query_record_array_), - query_range_array_(from.query_range_array_) { + query_range_array_(from.query_range_array_), + partition_tag_array_(from.partition_tag_array_) { _internal_metadata_.MergeFrom(from._internal_metadata_); table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (!from.table_name().empty()) { @@ -3143,6 +4299,7 @@ void SearchParam::Clear() { query_record_array_.Clear(); query_range_array_.Clear(); + partition_tag_array_.Clear(); table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); ::memset(&topk_, 0, static_cast( reinterpret_cast(&nprobe_) - @@ -3203,6 +4360,18 @@ const char* SearchParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID CHK_(ptr); } else goto handle_unusual; continue; + // repeated string partition_tag_array = 6; + case 6: + if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) { + ptr -= 1; + do { + ptr += 1; + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_partition_tag_array(), ptr, ctx, "milvus.grpc.SearchParam.partition_tag_array"); + CHK_(ptr); + if (!ctx->DataAvailable(ptr)) break; + } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50); + } else goto handle_unusual; + continue; default: { handle_unusual: if ((tag & 7) == 4 || tag == 0) { @@ -3296,6 +4465,22 @@ bool SearchParam::MergePartialFromCodedStream( break; } + // repeated string partition_tag_array = 6; + case 6: { + if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) { + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString( + input, this->add_partition_tag_array())); + DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag_array(this->partition_tag_array_size() - 1).data(), + static_cast(this->partition_tag_array(this->partition_tag_array_size() - 1).length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, + "milvus.grpc.SearchParam.partition_tag_array")); + } else { + goto handle_unusual; + } + break; + } + default: { handle_unusual: if (tag == 0) { @@ -3361,6 +4546,16 @@ void SearchParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(5, this->nprobe(), output); } + // repeated string partition_tag_array = 6; + for (int i = 0, n = this->partition_tag_array_size(); i < n; i++) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag_array(i).data(), static_cast(this->partition_tag_array(i).length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.SearchParam.partition_tag_array"); + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString( + 6, this->partition_tag_array(i), output); + } + if (_internal_metadata_.have_unknown_fields()) { ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); @@ -3411,6 +4606,16 @@ void SearchParam::SerializeWithCachedSizes( target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(5, this->nprobe(), target); } + // repeated string partition_tag_array = 6; + for (int i = 0, n = this->partition_tag_array_size(); i < n; i++) { + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( + this->partition_tag_array(i).data(), static_cast(this->partition_tag_array(i).length()), + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, + "milvus.grpc.SearchParam.partition_tag_array"); + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + WriteStringToArray(6, this->partition_tag_array(i), target); + } + if (_internal_metadata_.have_unknown_fields()) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); @@ -3454,6 +4659,14 @@ size_t SearchParam::ByteSizeLong() const { } } + // repeated string partition_tag_array = 6; + total_size += 1 * + ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->partition_tag_array_size()); + for (int i = 0, n = this->partition_tag_array_size(); i < n; i++) { + total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize( + this->partition_tag_array(i)); + } + // string table_name = 1; if (this->table_name().size() > 0) { total_size += 1 + @@ -3504,6 +4717,7 @@ void SearchParam::MergeFrom(const SearchParam& from) { query_record_array_.MergeFrom(from.query_record_array_); query_range_array_.MergeFrom(from.query_range_array_); + partition_tag_array_.MergeFrom(from.partition_tag_array_); if (from.table_name().size() > 0) { table_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.table_name_); @@ -3539,6 +4753,7 @@ void SearchParam::InternalSwap(SearchParam* other) { _internal_metadata_.Swap(&other->_internal_metadata_); CastToBase(&query_record_array_)->InternalSwap(CastToBase(&other->query_record_array_)); CastToBase(&query_range_array_)->InternalSwap(CastToBase(&other->query_range_array_)); + partition_tag_array_.InternalSwap(CastToBase(&other->partition_tag_array_)); table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual()); swap(topk_, other->topk_); @@ -6688,25 +7903,25 @@ void IndexParam::InternalSwap(IndexParam* other) { // =================================================================== -void DeleteByRangeParam::InitAsDefaultInstance() { - ::milvus::grpc::_DeleteByRangeParam_default_instance_._instance.get_mutable()->range_ = const_cast< ::milvus::grpc::Range*>( +void DeleteByDateParam::InitAsDefaultInstance() { + ::milvus::grpc::_DeleteByDateParam_default_instance_._instance.get_mutable()->range_ = const_cast< ::milvus::grpc::Range*>( ::milvus::grpc::Range::internal_default_instance()); } -class DeleteByRangeParam::_Internal { +class DeleteByDateParam::_Internal { public: - static const ::milvus::grpc::Range& range(const DeleteByRangeParam* msg); + static const ::milvus::grpc::Range& range(const DeleteByDateParam* msg); }; const ::milvus::grpc::Range& -DeleteByRangeParam::_Internal::range(const DeleteByRangeParam* msg) { +DeleteByDateParam::_Internal::range(const DeleteByDateParam* msg) { return *msg->range_; } -DeleteByRangeParam::DeleteByRangeParam() +DeleteByDateParam::DeleteByDateParam() : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { SharedCtor(); - // @@protoc_insertion_point(constructor:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(constructor:milvus.grpc.DeleteByDateParam) } -DeleteByRangeParam::DeleteByRangeParam(const DeleteByRangeParam& from) +DeleteByDateParam::DeleteByDateParam(const DeleteByDateParam& from) : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) { _internal_metadata_.MergeFrom(from._internal_metadata_); @@ -6719,36 +7934,36 @@ DeleteByRangeParam::DeleteByRangeParam(const DeleteByRangeParam& from) } else { range_ = nullptr; } - // @@protoc_insertion_point(copy_constructor:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(copy_constructor:milvus.grpc.DeleteByDateParam) } -void DeleteByRangeParam::SharedCtor() { - ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_DeleteByRangeParam_milvus_2eproto.base); +void DeleteByDateParam::SharedCtor() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_DeleteByDateParam_milvus_2eproto.base); table_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); range_ = nullptr; } -DeleteByRangeParam::~DeleteByRangeParam() { - // @@protoc_insertion_point(destructor:milvus.grpc.DeleteByRangeParam) +DeleteByDateParam::~DeleteByDateParam() { + // @@protoc_insertion_point(destructor:milvus.grpc.DeleteByDateParam) SharedDtor(); } -void DeleteByRangeParam::SharedDtor() { +void DeleteByDateParam::SharedDtor() { table_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (this != internal_default_instance()) delete range_; } -void DeleteByRangeParam::SetCachedSize(int size) const { +void DeleteByDateParam::SetCachedSize(int size) const { _cached_size_.Set(size); } -const DeleteByRangeParam& DeleteByRangeParam::default_instance() { - ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_DeleteByRangeParam_milvus_2eproto.base); +const DeleteByDateParam& DeleteByDateParam::default_instance() { + ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_DeleteByDateParam_milvus_2eproto.base); return *internal_default_instance(); } -void DeleteByRangeParam::Clear() { -// @@protoc_insertion_point(message_clear_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::Clear() { +// @@protoc_insertion_point(message_clear_start:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused (void) cached_has_bits; @@ -6762,7 +7977,7 @@ void DeleteByRangeParam::Clear() { } #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER -const char* DeleteByRangeParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { +const char* DeleteByDateParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { #define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; @@ -6779,7 +7994,7 @@ const char* DeleteByRangeParam::_InternalParse(const char* ptr, ::PROTOBUF_NAMES // string table_name = 2; case 2: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) { - ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_table_name(), ptr, ctx, "milvus.grpc.DeleteByRangeParam.table_name"); + ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_table_name(), ptr, ctx, "milvus.grpc.DeleteByDateParam.table_name"); CHK_(ptr); } else goto handle_unusual; continue; @@ -6803,11 +8018,11 @@ failure: #undef CHK_ } #else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER -bool DeleteByRangeParam::MergePartialFromCodedStream( +bool DeleteByDateParam::MergePartialFromCodedStream( ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) { #define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure ::PROTOBUF_NAMESPACE_ID::uint32 tag; - // @@protoc_insertion_point(parse_start:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(parse_start:milvus.grpc.DeleteByDateParam) for (;;) { ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u); tag = p.first; @@ -6832,7 +8047,7 @@ bool DeleteByRangeParam::MergePartialFromCodedStream( DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( this->table_name().data(), static_cast(this->table_name().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE, - "milvus.grpc.DeleteByRangeParam.table_name")); + "milvus.grpc.DeleteByDateParam.table_name")); } else { goto handle_unusual; } @@ -6851,18 +8066,18 @@ bool DeleteByRangeParam::MergePartialFromCodedStream( } } success: - // @@protoc_insertion_point(parse_success:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(parse_success:milvus.grpc.DeleteByDateParam) return true; failure: - // @@protoc_insertion_point(parse_failure:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(parse_failure:milvus.grpc.DeleteByDateParam) return false; #undef DO_ } #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER -void DeleteByRangeParam::SerializeWithCachedSizes( +void DeleteByDateParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const { - // @@protoc_insertion_point(serialize_start:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_start:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; @@ -6877,7 +8092,7 @@ void DeleteByRangeParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( this->table_name().data(), static_cast(this->table_name().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "milvus.grpc.DeleteByRangeParam.table_name"); + "milvus.grpc.DeleteByDateParam.table_name"); ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased( 2, this->table_name(), output); } @@ -6886,12 +8101,12 @@ void DeleteByRangeParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields( _internal_metadata_.unknown_fields(), output); } - // @@protoc_insertion_point(serialize_end:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_end:milvus.grpc.DeleteByDateParam) } -::PROTOBUF_NAMESPACE_ID::uint8* DeleteByRangeParam::InternalSerializeWithCachedSizesToArray( +::PROTOBUF_NAMESPACE_ID::uint8* DeleteByDateParam::InternalSerializeWithCachedSizesToArray( ::PROTOBUF_NAMESPACE_ID::uint8* target) const { - // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_to_array_start:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; (void) cached_has_bits; @@ -6907,7 +8122,7 @@ void DeleteByRangeParam::SerializeWithCachedSizes( ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String( this->table_name().data(), static_cast(this->table_name().length()), ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE, - "milvus.grpc.DeleteByRangeParam.table_name"); + "milvus.grpc.DeleteByDateParam.table_name"); target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray( 2, this->table_name(), target); @@ -6917,12 +8132,12 @@ void DeleteByRangeParam::SerializeWithCachedSizes( target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields(), target); } - // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(serialize_to_array_end:milvus.grpc.DeleteByDateParam) return target; } -size_t DeleteByRangeParam::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.DeleteByRangeParam) +size_t DeleteByDateParam::ByteSizeLong() const { +// @@protoc_insertion_point(message_byte_size_start:milvus.grpc.DeleteByDateParam) size_t total_size = 0; if (_internal_metadata_.have_unknown_fields()) { @@ -6953,23 +8168,23 @@ size_t DeleteByRangeParam::ByteSizeLong() const { return total_size; } -void DeleteByRangeParam::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { -// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_merge_from_start:milvus.grpc.DeleteByDateParam) GOOGLE_DCHECK_NE(&from, this); - const DeleteByRangeParam* source = - ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( + const DeleteByDateParam* source = + ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( &from); if (source == nullptr) { - // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.grpc.DeleteByDateParam) ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); } else { - // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.grpc.DeleteByDateParam) MergeFrom(*source); } } -void DeleteByRangeParam::MergeFrom(const DeleteByRangeParam& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::MergeFrom(const DeleteByDateParam& from) { +// @@protoc_insertion_point(class_specific_merge_from_start:milvus.grpc.DeleteByDateParam) GOOGLE_DCHECK_NE(&from, this); _internal_metadata_.MergeFrom(from._internal_metadata_); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; @@ -6984,25 +8199,25 @@ void DeleteByRangeParam::MergeFrom(const DeleteByRangeParam& from) { } } -void DeleteByRangeParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { -// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { +// @@protoc_insertion_point(generalized_copy_from_start:milvus.grpc.DeleteByDateParam) if (&from == this) return; Clear(); MergeFrom(from); } -void DeleteByRangeParam::CopyFrom(const DeleteByRangeParam& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.DeleteByRangeParam) +void DeleteByDateParam::CopyFrom(const DeleteByDateParam& from) { +// @@protoc_insertion_point(class_specific_copy_from_start:milvus.grpc.DeleteByDateParam) if (&from == this) return; Clear(); MergeFrom(from); } -bool DeleteByRangeParam::IsInitialized() const { +bool DeleteByDateParam::IsInitialized() const { return true; } -void DeleteByRangeParam::InternalSwap(DeleteByRangeParam* other) { +void DeleteByDateParam::InternalSwap(DeleteByDateParam* other) { using std::swap; _internal_metadata_.Swap(&other->_internal_metadata_); table_name_.Swap(&other->table_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), @@ -7010,7 +8225,7 @@ void DeleteByRangeParam::InternalSwap(DeleteByRangeParam* other) { swap(range_, other->range_); } -::PROTOBUF_NAMESPACE_ID::Metadata DeleteByRangeParam::GetMetadata() const { +::PROTOBUF_NAMESPACE_ID::Metadata DeleteByDateParam::GetMetadata() const { return GetMetadataStatic(); } @@ -7022,12 +8237,21 @@ PROTOBUF_NAMESPACE_OPEN template<> PROTOBUF_NOINLINE ::milvus::grpc::TableName* Arena::CreateMaybeMessage< ::milvus::grpc::TableName >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::TableName >(arena); } +template<> PROTOBUF_NOINLINE ::milvus::grpc::PartitionName* Arena::CreateMaybeMessage< ::milvus::grpc::PartitionName >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::PartitionName >(arena); +} template<> PROTOBUF_NOINLINE ::milvus::grpc::TableNameList* Arena::CreateMaybeMessage< ::milvus::grpc::TableNameList >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::TableNameList >(arena); } template<> PROTOBUF_NOINLINE ::milvus::grpc::TableSchema* Arena::CreateMaybeMessage< ::milvus::grpc::TableSchema >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::TableSchema >(arena); } +template<> PROTOBUF_NOINLINE ::milvus::grpc::PartitionParam* Arena::CreateMaybeMessage< ::milvus::grpc::PartitionParam >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::PartitionParam >(arena); +} +template<> PROTOBUF_NOINLINE ::milvus::grpc::PartitionList* Arena::CreateMaybeMessage< ::milvus::grpc::PartitionList >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::PartitionList >(arena); +} template<> PROTOBUF_NOINLINE ::milvus::grpc::Range* Arena::CreateMaybeMessage< ::milvus::grpc::Range >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::Range >(arena); } @@ -7073,8 +8297,8 @@ template<> PROTOBUF_NOINLINE ::milvus::grpc::Index* Arena::CreateMaybeMessage< : template<> PROTOBUF_NOINLINE ::milvus::grpc::IndexParam* Arena::CreateMaybeMessage< ::milvus::grpc::IndexParam >(Arena* arena) { return Arena::CreateInternal< ::milvus::grpc::IndexParam >(arena); } -template<> PROTOBUF_NOINLINE ::milvus::grpc::DeleteByRangeParam* Arena::CreateMaybeMessage< ::milvus::grpc::DeleteByRangeParam >(Arena* arena) { - return Arena::CreateInternal< ::milvus::grpc::DeleteByRangeParam >(arena); +template<> PROTOBUF_NOINLINE ::milvus::grpc::DeleteByDateParam* Arena::CreateMaybeMessage< ::milvus::grpc::DeleteByDateParam >(Arena* arena) { + return Arena::CreateInternal< ::milvus::grpc::DeleteByDateParam >(arena); } PROTOBUF_NAMESPACE_CLOSE diff --git a/core/src/grpc/gen-milvus/milvus.pb.h b/core/src/grpc/gen-milvus/milvus.pb.h index 5ac3fda023..f41ca2c8c4 100644 --- a/core/src/grpc/gen-milvus/milvus.pb.h +++ b/core/src/grpc/gen-milvus/milvus.pb.h @@ -48,7 +48,7 @@ struct TableStruct_milvus_2eproto { PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[] PROTOBUF_SECTION_VARIABLE(protodesc_cold); - static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[19] + static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[22] PROTOBUF_SECTION_VARIABLE(protodesc_cold); static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[]; static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[]; @@ -63,9 +63,9 @@ extern BoolReplyDefaultTypeInternal _BoolReply_default_instance_; class Command; class CommandDefaultTypeInternal; extern CommandDefaultTypeInternal _Command_default_instance_; -class DeleteByRangeParam; -class DeleteByRangeParamDefaultTypeInternal; -extern DeleteByRangeParamDefaultTypeInternal _DeleteByRangeParam_default_instance_; +class DeleteByDateParam; +class DeleteByDateParamDefaultTypeInternal; +extern DeleteByDateParamDefaultTypeInternal _DeleteByDateParam_default_instance_; class Index; class IndexDefaultTypeInternal; extern IndexDefaultTypeInternal _Index_default_instance_; @@ -75,6 +75,15 @@ extern IndexParamDefaultTypeInternal _IndexParam_default_instance_; class InsertParam; class InsertParamDefaultTypeInternal; extern InsertParamDefaultTypeInternal _InsertParam_default_instance_; +class PartitionList; +class PartitionListDefaultTypeInternal; +extern PartitionListDefaultTypeInternal _PartitionList_default_instance_; +class PartitionName; +class PartitionNameDefaultTypeInternal; +extern PartitionNameDefaultTypeInternal _PartitionName_default_instance_; +class PartitionParam; +class PartitionParamDefaultTypeInternal; +extern PartitionParamDefaultTypeInternal _PartitionParam_default_instance_; class QueryResult; class QueryResultDefaultTypeInternal; extern QueryResultDefaultTypeInternal _QueryResult_default_instance_; @@ -119,10 +128,13 @@ extern VectorIdsDefaultTypeInternal _VectorIds_default_instance_; PROTOBUF_NAMESPACE_OPEN template<> ::milvus::grpc::BoolReply* Arena::CreateMaybeMessage<::milvus::grpc::BoolReply>(Arena*); template<> ::milvus::grpc::Command* Arena::CreateMaybeMessage<::milvus::grpc::Command>(Arena*); -template<> ::milvus::grpc::DeleteByRangeParam* Arena::CreateMaybeMessage<::milvus::grpc::DeleteByRangeParam>(Arena*); +template<> ::milvus::grpc::DeleteByDateParam* Arena::CreateMaybeMessage<::milvus::grpc::DeleteByDateParam>(Arena*); template<> ::milvus::grpc::Index* Arena::CreateMaybeMessage<::milvus::grpc::Index>(Arena*); template<> ::milvus::grpc::IndexParam* Arena::CreateMaybeMessage<::milvus::grpc::IndexParam>(Arena*); template<> ::milvus::grpc::InsertParam* Arena::CreateMaybeMessage<::milvus::grpc::InsertParam>(Arena*); +template<> ::milvus::grpc::PartitionList* Arena::CreateMaybeMessage<::milvus::grpc::PartitionList>(Arena*); +template<> ::milvus::grpc::PartitionName* Arena::CreateMaybeMessage<::milvus::grpc::PartitionName>(Arena*); +template<> ::milvus::grpc::PartitionParam* Arena::CreateMaybeMessage<::milvus::grpc::PartitionParam>(Arena*); template<> ::milvus::grpc::QueryResult* Arena::CreateMaybeMessage<::milvus::grpc::QueryResult>(Arena*); template<> ::milvus::grpc::Range* Arena::CreateMaybeMessage<::milvus::grpc::Range>(Arena*); template<> ::milvus::grpc::RowRecord* Arena::CreateMaybeMessage<::milvus::grpc::RowRecord>(Arena*); @@ -279,6 +291,143 @@ class TableName : }; // ------------------------------------------------------------------- +class PartitionName : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.PartitionName) */ { + public: + PartitionName(); + virtual ~PartitionName(); + + PartitionName(const PartitionName& from); + PartitionName(PartitionName&& from) noexcept + : PartitionName() { + *this = ::std::move(from); + } + + inline PartitionName& operator=(const PartitionName& from) { + CopyFrom(from); + return *this; + } + inline PartitionName& operator=(PartitionName&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return GetMetadataStatic().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return GetMetadataStatic().reflection; + } + static const PartitionName& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const PartitionName* internal_default_instance() { + return reinterpret_cast( + &_PartitionName_default_instance_); + } + static constexpr int kIndexInFileMessages = + 1; + + friend void swap(PartitionName& a, PartitionName& b) { + a.Swap(&b); + } + inline void Swap(PartitionName* other) { + if (other == this) return; + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PartitionName* New() const final { + return CreateMaybeMessage(nullptr); + } + + PartitionName* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void CopyFrom(const PartitionName& from); + void MergeFrom(const PartitionName& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + #else + bool MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final; + ::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + inline void SharedCtor(); + inline void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PartitionName* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "milvus.grpc.PartitionName"; + } + private: + inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + private: + static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_milvus_2eproto); + return ::descriptor_table_milvus_2eproto.file_level_metadata[kIndexInFileMessages]; + } + + public: + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kPartitionNameFieldNumber = 1, + }; + // string partition_name = 1; + void clear_partition_name(); + const std::string& partition_name() const; + void set_partition_name(const std::string& value); + void set_partition_name(std::string&& value); + void set_partition_name(const char* value); + void set_partition_name(const char* value, size_t size); + std::string* mutable_partition_name(); + std::string* release_partition_name(); + void set_allocated_partition_name(std::string* partition_name); + + // @@protoc_insertion_point(class_scope:milvus.grpc.PartitionName) + private: + class _Internal; + + ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr partition_name_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_milvus_2eproto; +}; +// ------------------------------------------------------------------- + class TableNameList : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.TableNameList) */ { public: @@ -321,7 +470,7 @@ class TableNameList : &_TableNameList_default_instance_); } static constexpr int kIndexInFileMessages = - 1; + 2; friend void swap(TableNameList& a, TableNameList& b) { a.Swap(&b); @@ -474,7 +623,7 @@ class TableSchema : &_TableSchema_default_instance_); } static constexpr int kIndexInFileMessages = - 2; + 3; friend void swap(TableSchema& a, TableSchema& b) { a.Swap(&b); @@ -600,6 +749,316 @@ class TableSchema : }; // ------------------------------------------------------------------- +class PartitionParam : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.PartitionParam) */ { + public: + PartitionParam(); + virtual ~PartitionParam(); + + PartitionParam(const PartitionParam& from); + PartitionParam(PartitionParam&& from) noexcept + : PartitionParam() { + *this = ::std::move(from); + } + + inline PartitionParam& operator=(const PartitionParam& from) { + CopyFrom(from); + return *this; + } + inline PartitionParam& operator=(PartitionParam&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return GetMetadataStatic().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return GetMetadataStatic().reflection; + } + static const PartitionParam& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const PartitionParam* internal_default_instance() { + return reinterpret_cast( + &_PartitionParam_default_instance_); + } + static constexpr int kIndexInFileMessages = + 4; + + friend void swap(PartitionParam& a, PartitionParam& b) { + a.Swap(&b); + } + inline void Swap(PartitionParam* other) { + if (other == this) return; + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PartitionParam* New() const final { + return CreateMaybeMessage(nullptr); + } + + PartitionParam* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void CopyFrom(const PartitionParam& from); + void MergeFrom(const PartitionParam& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + #else + bool MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final; + ::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + inline void SharedCtor(); + inline void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PartitionParam* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "milvus.grpc.PartitionParam"; + } + private: + inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + private: + static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_milvus_2eproto); + return ::descriptor_table_milvus_2eproto.file_level_metadata[kIndexInFileMessages]; + } + + public: + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kTableNameFieldNumber = 1, + kPartitionNameFieldNumber = 2, + kTagFieldNumber = 3, + }; + // string table_name = 1; + void clear_table_name(); + const std::string& table_name() const; + void set_table_name(const std::string& value); + void set_table_name(std::string&& value); + void set_table_name(const char* value); + void set_table_name(const char* value, size_t size); + std::string* mutable_table_name(); + std::string* release_table_name(); + void set_allocated_table_name(std::string* table_name); + + // string partition_name = 2; + void clear_partition_name(); + const std::string& partition_name() const; + void set_partition_name(const std::string& value); + void set_partition_name(std::string&& value); + void set_partition_name(const char* value); + void set_partition_name(const char* value, size_t size); + std::string* mutable_partition_name(); + std::string* release_partition_name(); + void set_allocated_partition_name(std::string* partition_name); + + // string tag = 3; + void clear_tag(); + const std::string& tag() const; + void set_tag(const std::string& value); + void set_tag(std::string&& value); + void set_tag(const char* value); + void set_tag(const char* value, size_t size); + std::string* mutable_tag(); + std::string* release_tag(); + void set_allocated_tag(std::string* tag); + + // @@protoc_insertion_point(class_scope:milvus.grpc.PartitionParam) + private: + class _Internal; + + ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr table_name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr partition_name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr tag_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_milvus_2eproto; +}; +// ------------------------------------------------------------------- + +class PartitionList : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.PartitionList) */ { + public: + PartitionList(); + virtual ~PartitionList(); + + PartitionList(const PartitionList& from); + PartitionList(PartitionList&& from) noexcept + : PartitionList() { + *this = ::std::move(from); + } + + inline PartitionList& operator=(const PartitionList& from) { + CopyFrom(from); + return *this; + } + inline PartitionList& operator=(PartitionList&& from) noexcept { + if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { + if (this != &from) InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return GetMetadataStatic().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return GetMetadataStatic().reflection; + } + static const PartitionList& default_instance(); + + static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY + static inline const PartitionList* internal_default_instance() { + return reinterpret_cast( + &_PartitionList_default_instance_); + } + static constexpr int kIndexInFileMessages = + 5; + + friend void swap(PartitionList& a, PartitionList& b) { + a.Swap(&b); + } + inline void Swap(PartitionList* other) { + if (other == this) return; + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + inline PartitionList* New() const final { + return CreateMaybeMessage(nullptr); + } + + PartitionList* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); + } + void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; + void CopyFrom(const PartitionList& from); + void MergeFrom(const PartitionList& from); + PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; + bool IsInitialized() const final; + + size_t ByteSizeLong() const final; + #if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final; + #else + bool MergePartialFromCodedStream( + ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final; + #endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER + void SerializeWithCachedSizes( + ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final; + ::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray( + ::PROTOBUF_NAMESPACE_ID::uint8* target) const final; + int GetCachedSize() const final { return _cached_size_.Get(); } + + private: + inline void SharedCtor(); + inline void SharedDtor(); + void SetCachedSize(int size) const final; + void InternalSwap(PartitionList* other); + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "milvus.grpc.PartitionList"; + } + private: + inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { + return nullptr; + } + inline void* MaybeArenaPtr() const { + return nullptr; + } + public: + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + private: + static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() { + ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_milvus_2eproto); + return ::descriptor_table_milvus_2eproto.file_level_metadata[kIndexInFileMessages]; + } + + public: + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + enum : int { + kPartitionArrayFieldNumber = 2, + kStatusFieldNumber = 1, + }; + // repeated .milvus.grpc.PartitionParam partition_array = 2; + int partition_array_size() const; + void clear_partition_array(); + ::milvus::grpc::PartitionParam* mutable_partition_array(int index); + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >* + mutable_partition_array(); + const ::milvus::grpc::PartitionParam& partition_array(int index) const; + ::milvus::grpc::PartitionParam* add_partition_array(); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >& + partition_array() const; + + // .milvus.grpc.Status status = 1; + bool has_status() const; + void clear_status(); + const ::milvus::grpc::Status& status() const; + ::milvus::grpc::Status* release_status(); + ::milvus::grpc::Status* mutable_status(); + void set_allocated_status(::milvus::grpc::Status* status); + + // @@protoc_insertion_point(class_scope:milvus.grpc.PartitionList) + private: + class _Internal; + + ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam > partition_array_; + ::milvus::grpc::Status* status_; + mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; + friend struct ::TableStruct_milvus_2eproto; +}; +// ------------------------------------------------------------------- + class Range : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.Range) */ { public: @@ -642,7 +1101,7 @@ class Range : &_Range_default_instance_); } static constexpr int kIndexInFileMessages = - 3; + 6; friend void swap(Range& a, Range& b) { a.Swap(&b); @@ -792,7 +1251,7 @@ class RowRecord : &_RowRecord_default_instance_); } static constexpr int kIndexInFileMessages = - 4; + 7; friend void swap(RowRecord& a, RowRecord& b) { a.Swap(&b); @@ -930,7 +1389,7 @@ class InsertParam : &_InsertParam_default_instance_); } static constexpr int kIndexInFileMessages = - 5; + 8; friend void swap(InsertParam& a, InsertParam& b) { a.Swap(&b); @@ -1004,6 +1463,7 @@ class InsertParam : kRowRecordArrayFieldNumber = 2, kRowIdArrayFieldNumber = 3, kTableNameFieldNumber = 1, + kPartitionTagFieldNumber = 4, }; // repeated .milvus.grpc.RowRecord row_record_array = 2; int row_record_array_size() const; @@ -1038,6 +1498,17 @@ class InsertParam : std::string* release_table_name(); void set_allocated_table_name(std::string* table_name); + // string partition_tag = 4; + void clear_partition_tag(); + const std::string& partition_tag() const; + void set_partition_tag(const std::string& value); + void set_partition_tag(std::string&& value); + void set_partition_tag(const char* value); + void set_partition_tag(const char* value, size_t size); + std::string* mutable_partition_tag(); + std::string* release_partition_tag(); + void set_allocated_partition_tag(std::string* partition_tag); + // @@protoc_insertion_point(class_scope:milvus.grpc.InsertParam) private: class _Internal; @@ -1047,6 +1518,7 @@ class InsertParam : ::PROTOBUF_NAMESPACE_ID::RepeatedField< ::PROTOBUF_NAMESPACE_ID::int64 > row_id_array_; mutable std::atomic _row_id_array_cached_byte_size_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr table_name_; + ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr partition_tag_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; friend struct ::TableStruct_milvus_2eproto; }; @@ -1094,7 +1566,7 @@ class VectorIds : &_VectorIds_default_instance_); } static constexpr int kIndexInFileMessages = - 6; + 9; friend void swap(VectorIds& a, VectorIds& b) { a.Swap(&b); @@ -1242,7 +1714,7 @@ class SearchParam : &_SearchParam_default_instance_); } static constexpr int kIndexInFileMessages = - 7; + 10; friend void swap(SearchParam& a, SearchParam& b) { a.Swap(&b); @@ -1315,6 +1787,7 @@ class SearchParam : enum : int { kQueryRecordArrayFieldNumber = 2, kQueryRangeArrayFieldNumber = 3, + kPartitionTagArrayFieldNumber = 6, kTableNameFieldNumber = 1, kTopkFieldNumber = 4, kNprobeFieldNumber = 5, @@ -1341,6 +1814,23 @@ class SearchParam : const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::Range >& query_range_array() const; + // repeated string partition_tag_array = 6; + int partition_tag_array_size() const; + void clear_partition_tag_array(); + const std::string& partition_tag_array(int index) const; + std::string* mutable_partition_tag_array(int index); + void set_partition_tag_array(int index, const std::string& value); + void set_partition_tag_array(int index, std::string&& value); + void set_partition_tag_array(int index, const char* value); + void set_partition_tag_array(int index, const char* value, size_t size); + std::string* add_partition_tag_array(); + void add_partition_tag_array(const std::string& value); + void add_partition_tag_array(std::string&& value); + void add_partition_tag_array(const char* value); + void add_partition_tag_array(const char* value, size_t size); + const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& partition_tag_array() const; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* mutable_partition_tag_array(); + // string table_name = 1; void clear_table_name(); const std::string& table_name() const; @@ -1369,6 +1859,7 @@ class SearchParam : ::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::RowRecord > query_record_array_; ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::Range > query_range_array_; + ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField partition_tag_array_; ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr table_name_; ::PROTOBUF_NAMESPACE_ID::int64 topk_; ::PROTOBUF_NAMESPACE_ID::int64 nprobe_; @@ -1419,7 +1910,7 @@ class SearchInFilesParam : &_SearchInFilesParam_default_instance_); } static constexpr int kIndexInFileMessages = - 8; + 11; friend void swap(SearchInFilesParam& a, SearchInFilesParam& b) { a.Swap(&b); @@ -1572,7 +2063,7 @@ class QueryResult : &_QueryResult_default_instance_); } static constexpr int kIndexInFileMessages = - 9; + 12; friend void swap(QueryResult& a, QueryResult& b) { a.Swap(&b); @@ -1710,7 +2201,7 @@ class TopKQueryResult : &_TopKQueryResult_default_instance_); } static constexpr int kIndexInFileMessages = - 10; + 13; friend void swap(TopKQueryResult& a, TopKQueryResult& b) { a.Swap(&b); @@ -1847,7 +2338,7 @@ class TopKQueryResultList : &_TopKQueryResultList_default_instance_); } static constexpr int kIndexInFileMessages = - 11; + 14; friend void swap(TopKQueryResultList& a, TopKQueryResultList& b) { a.Swap(&b); @@ -1994,7 +2485,7 @@ class StringReply : &_StringReply_default_instance_); } static constexpr int kIndexInFileMessages = - 12; + 15; friend void swap(StringReply& a, StringReply& b) { a.Swap(&b); @@ -2141,7 +2632,7 @@ class BoolReply : &_BoolReply_default_instance_); } static constexpr int kIndexInFileMessages = - 13; + 16; friend void swap(BoolReply& a, BoolReply& b) { a.Swap(&b); @@ -2282,7 +2773,7 @@ class TableRowCount : &_TableRowCount_default_instance_); } static constexpr int kIndexInFileMessages = - 14; + 17; friend void swap(TableRowCount& a, TableRowCount& b) { a.Swap(&b); @@ -2423,7 +2914,7 @@ class Command : &_Command_default_instance_); } static constexpr int kIndexInFileMessages = - 15; + 18; friend void swap(Command& a, Command& b) { a.Swap(&b); @@ -2560,7 +3051,7 @@ class Index : &_Index_default_instance_); } static constexpr int kIndexInFileMessages = - 16; + 19; friend void swap(Index& a, Index& b) { a.Swap(&b); @@ -2698,7 +3189,7 @@ class IndexParam : &_IndexParam_default_instance_); } static constexpr int kIndexInFileMessages = - 17; + 20; friend void swap(IndexParam& a, IndexParam& b) { a.Swap(&b); @@ -2813,23 +3304,23 @@ class IndexParam : }; // ------------------------------------------------------------------- -class DeleteByRangeParam : - public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.DeleteByRangeParam) */ { +class DeleteByDateParam : + public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.grpc.DeleteByDateParam) */ { public: - DeleteByRangeParam(); - virtual ~DeleteByRangeParam(); + DeleteByDateParam(); + virtual ~DeleteByDateParam(); - DeleteByRangeParam(const DeleteByRangeParam& from); - DeleteByRangeParam(DeleteByRangeParam&& from) noexcept - : DeleteByRangeParam() { + DeleteByDateParam(const DeleteByDateParam& from); + DeleteByDateParam(DeleteByDateParam&& from) noexcept + : DeleteByDateParam() { *this = ::std::move(from); } - inline DeleteByRangeParam& operator=(const DeleteByRangeParam& from) { + inline DeleteByDateParam& operator=(const DeleteByDateParam& from) { CopyFrom(from); return *this; } - inline DeleteByRangeParam& operator=(DeleteByRangeParam&& from) noexcept { + inline DeleteByDateParam& operator=(DeleteByDateParam&& from) noexcept { if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) { if (this != &from) InternalSwap(&from); } else { @@ -2847,37 +3338,37 @@ class DeleteByRangeParam : static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { return GetMetadataStatic().reflection; } - static const DeleteByRangeParam& default_instance(); + static const DeleteByDateParam& default_instance(); static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY - static inline const DeleteByRangeParam* internal_default_instance() { - return reinterpret_cast( - &_DeleteByRangeParam_default_instance_); + static inline const DeleteByDateParam* internal_default_instance() { + return reinterpret_cast( + &_DeleteByDateParam_default_instance_); } static constexpr int kIndexInFileMessages = - 18; + 21; - friend void swap(DeleteByRangeParam& a, DeleteByRangeParam& b) { + friend void swap(DeleteByDateParam& a, DeleteByDateParam& b) { a.Swap(&b); } - inline void Swap(DeleteByRangeParam* other) { + inline void Swap(DeleteByDateParam* other) { if (other == this) return; InternalSwap(other); } // implements Message ---------------------------------------------- - inline DeleteByRangeParam* New() const final { - return CreateMaybeMessage(nullptr); + inline DeleteByDateParam* New() const final { + return CreateMaybeMessage(nullptr); } - DeleteByRangeParam* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { - return CreateMaybeMessage(arena); + DeleteByDateParam* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final { + return CreateMaybeMessage(arena); } void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final; - void CopyFrom(const DeleteByRangeParam& from); - void MergeFrom(const DeleteByRangeParam& from); + void CopyFrom(const DeleteByDateParam& from); + void MergeFrom(const DeleteByDateParam& from); PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final; bool IsInitialized() const final; @@ -2898,10 +3389,10 @@ class DeleteByRangeParam : inline void SharedCtor(); inline void SharedDtor(); void SetCachedSize(int size) const final; - void InternalSwap(DeleteByRangeParam* other); + void InternalSwap(DeleteByDateParam* other); friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { - return "milvus.grpc.DeleteByRangeParam"; + return "milvus.grpc.DeleteByDateParam"; } private: inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const { @@ -2948,7 +3439,7 @@ class DeleteByRangeParam : ::milvus::grpc::Range* mutable_range(); void set_allocated_range(::milvus::grpc::Range* range); - // @@protoc_insertion_point(class_scope:milvus.grpc.DeleteByRangeParam) + // @@protoc_insertion_point(class_scope:milvus.grpc.DeleteByDateParam) private: class _Internal; @@ -3022,6 +3513,61 @@ inline void TableName::set_allocated_table_name(std::string* table_name) { // ------------------------------------------------------------------- +// PartitionName + +// string partition_name = 1; +inline void PartitionName::clear_partition_name() { + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionName::partition_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionName.partition_name) + return partition_name_.GetNoArena(); +} +inline void PartitionName::set_partition_name(const std::string& value) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionName.partition_name) +} +inline void PartitionName::set_partition_name(std::string&& value) { + + partition_name_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionName.partition_name) +} +inline void PartitionName::set_partition_name(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionName.partition_name) +} +inline void PartitionName::set_partition_name(const char* value, size_t size) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionName.partition_name) +} +inline std::string* PartitionName::mutable_partition_name() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionName.partition_name) + return partition_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionName::release_partition_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionName.partition_name) + + return partition_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionName::set_allocated_partition_name(std::string* partition_name) { + if (partition_name != nullptr) { + + } else { + + } + partition_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), partition_name); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionName.partition_name) +} + +// ------------------------------------------------------------------- + // TableNameList // .milvus.grpc.Status status = 1; @@ -3278,6 +3824,242 @@ inline void TableSchema::set_metric_type(::PROTOBUF_NAMESPACE_ID::int32 value) { // ------------------------------------------------------------------- +// PartitionParam + +// string table_name = 1; +inline void PartitionParam::clear_table_name() { + table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionParam::table_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionParam.table_name) + return table_name_.GetNoArena(); +} +inline void PartitionParam::set_table_name(const std::string& value) { + + table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionParam.table_name) +} +inline void PartitionParam::set_table_name(std::string&& value) { + + table_name_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionParam.table_name) +} +inline void PartitionParam::set_table_name(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionParam.table_name) +} +inline void PartitionParam::set_table_name(const char* value, size_t size) { + + table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionParam.table_name) +} +inline std::string* PartitionParam::mutable_table_name() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionParam.table_name) + return table_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionParam::release_table_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionParam.table_name) + + return table_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionParam::set_allocated_table_name(std::string* table_name) { + if (table_name != nullptr) { + + } else { + + } + table_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), table_name); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionParam.table_name) +} + +// string partition_name = 2; +inline void PartitionParam::clear_partition_name() { + partition_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionParam::partition_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionParam.partition_name) + return partition_name_.GetNoArena(); +} +inline void PartitionParam::set_partition_name(const std::string& value) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionParam.partition_name) +} +inline void PartitionParam::set_partition_name(std::string&& value) { + + partition_name_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionParam.partition_name) +} +inline void PartitionParam::set_partition_name(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionParam.partition_name) +} +inline void PartitionParam::set_partition_name(const char* value, size_t size) { + + partition_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionParam.partition_name) +} +inline std::string* PartitionParam::mutable_partition_name() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionParam.partition_name) + return partition_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionParam::release_partition_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionParam.partition_name) + + return partition_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionParam::set_allocated_partition_name(std::string* partition_name) { + if (partition_name != nullptr) { + + } else { + + } + partition_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), partition_name); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionParam.partition_name) +} + +// string tag = 3; +inline void PartitionParam::clear_tag() { + tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& PartitionParam::tag() const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionParam.tag) + return tag_.GetNoArena(); +} +inline void PartitionParam::set_tag(const std::string& value) { + + tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.PartitionParam.tag) +} +inline void PartitionParam::set_tag(std::string&& value) { + + tag_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.PartitionParam.tag) +} +inline void PartitionParam::set_tag(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.PartitionParam.tag) +} +inline void PartitionParam::set_tag(const char* value, size_t size) { + + tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.PartitionParam.tag) +} +inline std::string* PartitionParam::mutable_tag() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionParam.tag) + return tag_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* PartitionParam::release_tag() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionParam.tag) + + return tag_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void PartitionParam::set_allocated_tag(std::string* tag) { + if (tag != nullptr) { + + } else { + + } + tag_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), tag); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionParam.tag) +} + +// ------------------------------------------------------------------- + +// PartitionList + +// .milvus.grpc.Status status = 1; +inline bool PartitionList::has_status() const { + return this != internal_default_instance() && status_ != nullptr; +} +inline const ::milvus::grpc::Status& PartitionList::status() const { + const ::milvus::grpc::Status* p = status_; + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionList.status) + return p != nullptr ? *p : *reinterpret_cast( + &::milvus::grpc::_Status_default_instance_); +} +inline ::milvus::grpc::Status* PartitionList::release_status() { + // @@protoc_insertion_point(field_release:milvus.grpc.PartitionList.status) + + ::milvus::grpc::Status* temp = status_; + status_ = nullptr; + return temp; +} +inline ::milvus::grpc::Status* PartitionList::mutable_status() { + + if (status_ == nullptr) { + auto* p = CreateMaybeMessage<::milvus::grpc::Status>(GetArenaNoVirtual()); + status_ = p; + } + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionList.status) + return status_; +} +inline void PartitionList::set_allocated_status(::milvus::grpc::Status* status) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual(); + if (message_arena == nullptr) { + delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(status_); + } + if (status) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = nullptr; + if (message_arena != submessage_arena) { + status = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, status, submessage_arena); + } + + } else { + + } + status_ = status; + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.PartitionList.status) +} + +// repeated .milvus.grpc.PartitionParam partition_array = 2; +inline int PartitionList::partition_array_size() const { + return partition_array_.size(); +} +inline void PartitionList::clear_partition_array() { + partition_array_.Clear(); +} +inline ::milvus::grpc::PartitionParam* PartitionList::mutable_partition_array(int index) { + // @@protoc_insertion_point(field_mutable:milvus.grpc.PartitionList.partition_array) + return partition_array_.Mutable(index); +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >* +PartitionList::mutable_partition_array() { + // @@protoc_insertion_point(field_mutable_list:milvus.grpc.PartitionList.partition_array) + return &partition_array_; +} +inline const ::milvus::grpc::PartitionParam& PartitionList::partition_array(int index) const { + // @@protoc_insertion_point(field_get:milvus.grpc.PartitionList.partition_array) + return partition_array_.Get(index); +} +inline ::milvus::grpc::PartitionParam* PartitionList::add_partition_array() { + // @@protoc_insertion_point(field_add:milvus.grpc.PartitionList.partition_array) + return partition_array_.Add(); +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::grpc::PartitionParam >& +PartitionList::partition_array() const { + // @@protoc_insertion_point(field_list:milvus.grpc.PartitionList.partition_array) + return partition_array_; +} + +// ------------------------------------------------------------------- + // Range // string start_value = 1; @@ -3531,6 +4313,57 @@ InsertParam::mutable_row_id_array() { return &row_id_array_; } +// string partition_tag = 4; +inline void InsertParam::clear_partition_tag() { + partition_tag_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline const std::string& InsertParam::partition_tag() const { + // @@protoc_insertion_point(field_get:milvus.grpc.InsertParam.partition_tag) + return partition_tag_.GetNoArena(); +} +inline void InsertParam::set_partition_tag(const std::string& value) { + + partition_tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); + // @@protoc_insertion_point(field_set:milvus.grpc.InsertParam.partition_tag) +} +inline void InsertParam::set_partition_tag(std::string&& value) { + + partition_tag_.SetNoArena( + &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.InsertParam.partition_tag) +} +inline void InsertParam::set_partition_tag(const char* value) { + GOOGLE_DCHECK(value != nullptr); + + partition_tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); + // @@protoc_insertion_point(field_set_char:milvus.grpc.InsertParam.partition_tag) +} +inline void InsertParam::set_partition_tag(const char* value, size_t size) { + + partition_tag_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + ::std::string(reinterpret_cast(value), size)); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.InsertParam.partition_tag) +} +inline std::string* InsertParam::mutable_partition_tag() { + + // @@protoc_insertion_point(field_mutable:milvus.grpc.InsertParam.partition_tag) + return partition_tag_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline std::string* InsertParam::release_partition_tag() { + // @@protoc_insertion_point(field_release:milvus.grpc.InsertParam.partition_tag) + + return partition_tag_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); +} +inline void InsertParam::set_allocated_partition_tag(std::string* partition_tag) { + if (partition_tag != nullptr) { + + } else { + + } + partition_tag_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), partition_tag); + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.InsertParam.partition_tag) +} + // ------------------------------------------------------------------- // VectorIds @@ -3753,6 +4586,71 @@ inline void SearchParam::set_nprobe(::PROTOBUF_NAMESPACE_ID::int64 value) { // @@protoc_insertion_point(field_set:milvus.grpc.SearchParam.nprobe) } +// repeated string partition_tag_array = 6; +inline int SearchParam::partition_tag_array_size() const { + return partition_tag_array_.size(); +} +inline void SearchParam::clear_partition_tag_array() { + partition_tag_array_.Clear(); +} +inline const std::string& SearchParam::partition_tag_array(int index) const { + // @@protoc_insertion_point(field_get:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_.Get(index); +} +inline std::string* SearchParam::mutable_partition_tag_array(int index) { + // @@protoc_insertion_point(field_mutable:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_.Mutable(index); +} +inline void SearchParam::set_partition_tag_array(int index, const std::string& value) { + // @@protoc_insertion_point(field_set:milvus.grpc.SearchParam.partition_tag_array) + partition_tag_array_.Mutable(index)->assign(value); +} +inline void SearchParam::set_partition_tag_array(int index, std::string&& value) { + // @@protoc_insertion_point(field_set:milvus.grpc.SearchParam.partition_tag_array) + partition_tag_array_.Mutable(index)->assign(std::move(value)); +} +inline void SearchParam::set_partition_tag_array(int index, const char* value) { + GOOGLE_DCHECK(value != nullptr); + partition_tag_array_.Mutable(index)->assign(value); + // @@protoc_insertion_point(field_set_char:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::set_partition_tag_array(int index, const char* value, size_t size) { + partition_tag_array_.Mutable(index)->assign( + reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.SearchParam.partition_tag_array) +} +inline std::string* SearchParam::add_partition_tag_array() { + // @@protoc_insertion_point(field_add_mutable:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_.Add(); +} +inline void SearchParam::add_partition_tag_array(const std::string& value) { + partition_tag_array_.Add()->assign(value); + // @@protoc_insertion_point(field_add:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::add_partition_tag_array(std::string&& value) { + partition_tag_array_.Add(std::move(value)); + // @@protoc_insertion_point(field_add:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::add_partition_tag_array(const char* value) { + GOOGLE_DCHECK(value != nullptr); + partition_tag_array_.Add()->assign(value); + // @@protoc_insertion_point(field_add_char:milvus.grpc.SearchParam.partition_tag_array) +} +inline void SearchParam::add_partition_tag_array(const char* value, size_t size) { + partition_tag_array_.Add()->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_add_pointer:milvus.grpc.SearchParam.partition_tag_array) +} +inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField& +SearchParam::partition_tag_array() const { + // @@protoc_insertion_point(field_list:milvus.grpc.SearchParam.partition_tag_array) + return partition_tag_array_; +} +inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField* +SearchParam::mutable_partition_tag_array() { + // @@protoc_insertion_point(field_mutable_list:milvus.grpc.SearchParam.partition_tag_array) + return &partition_tag_array_; +} + // ------------------------------------------------------------------- // SearchInFilesParam @@ -4484,41 +5382,41 @@ inline void IndexParam::set_allocated_index(::milvus::grpc::Index* index) { // ------------------------------------------------------------------- -// DeleteByRangeParam +// DeleteByDateParam // .milvus.grpc.Range range = 1; -inline bool DeleteByRangeParam::has_range() const { +inline bool DeleteByDateParam::has_range() const { return this != internal_default_instance() && range_ != nullptr; } -inline void DeleteByRangeParam::clear_range() { +inline void DeleteByDateParam::clear_range() { if (GetArenaNoVirtual() == nullptr && range_ != nullptr) { delete range_; } range_ = nullptr; } -inline const ::milvus::grpc::Range& DeleteByRangeParam::range() const { +inline const ::milvus::grpc::Range& DeleteByDateParam::range() const { const ::milvus::grpc::Range* p = range_; - // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByRangeParam.range) + // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByDateParam.range) return p != nullptr ? *p : *reinterpret_cast( &::milvus::grpc::_Range_default_instance_); } -inline ::milvus::grpc::Range* DeleteByRangeParam::release_range() { - // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByRangeParam.range) +inline ::milvus::grpc::Range* DeleteByDateParam::release_range() { + // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByDateParam.range) ::milvus::grpc::Range* temp = range_; range_ = nullptr; return temp; } -inline ::milvus::grpc::Range* DeleteByRangeParam::mutable_range() { +inline ::milvus::grpc::Range* DeleteByDateParam::mutable_range() { if (range_ == nullptr) { auto* p = CreateMaybeMessage<::milvus::grpc::Range>(GetArenaNoVirtual()); range_ = p; } - // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByRangeParam.range) + // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByDateParam.range) return range_; } -inline void DeleteByRangeParam::set_allocated_range(::milvus::grpc::Range* range) { +inline void DeleteByDateParam::set_allocated_range(::milvus::grpc::Range* range) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual(); if (message_arena == nullptr) { delete range_; @@ -4534,58 +5432,58 @@ inline void DeleteByRangeParam::set_allocated_range(::milvus::grpc::Range* range } range_ = range; - // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByRangeParam.range) + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByDateParam.range) } // string table_name = 2; -inline void DeleteByRangeParam::clear_table_name() { +inline void DeleteByDateParam::clear_table_name() { table_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } -inline const std::string& DeleteByRangeParam::table_name() const { - // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByRangeParam.table_name) +inline const std::string& DeleteByDateParam::table_name() const { + // @@protoc_insertion_point(field_get:milvus.grpc.DeleteByDateParam.table_name) return table_name_.GetNoArena(); } -inline void DeleteByRangeParam::set_table_name(const std::string& value) { +inline void DeleteByDateParam::set_table_name(const std::string& value) { table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value); - // @@protoc_insertion_point(field_set:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set:milvus.grpc.DeleteByDateParam.table_name) } -inline void DeleteByRangeParam::set_table_name(std::string&& value) { +inline void DeleteByDateParam::set_table_name(std::string&& value) { table_name_.SetNoArena( &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value)); - // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_rvalue:milvus.grpc.DeleteByDateParam.table_name) } -inline void DeleteByRangeParam::set_table_name(const char* value) { +inline void DeleteByDateParam::set_table_name(const char* value) { GOOGLE_DCHECK(value != nullptr); table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); - // @@protoc_insertion_point(field_set_char:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_char:milvus.grpc.DeleteByDateParam.table_name) } -inline void DeleteByRangeParam::set_table_name(const char* value, size_t size) { +inline void DeleteByDateParam::set_table_name(const char* value, size_t size) { table_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(reinterpret_cast(value), size)); - // @@protoc_insertion_point(field_set_pointer:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_pointer:milvus.grpc.DeleteByDateParam.table_name) } -inline std::string* DeleteByRangeParam::mutable_table_name() { +inline std::string* DeleteByDateParam::mutable_table_name() { - // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_mutable:milvus.grpc.DeleteByDateParam.table_name) return table_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } -inline std::string* DeleteByRangeParam::release_table_name() { - // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByRangeParam.table_name) +inline std::string* DeleteByDateParam::release_table_name() { + // @@protoc_insertion_point(field_release:milvus.grpc.DeleteByDateParam.table_name) return table_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); } -inline void DeleteByRangeParam::set_allocated_table_name(std::string* table_name) { +inline void DeleteByDateParam::set_allocated_table_name(std::string* table_name) { if (table_name != nullptr) { } else { } table_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), table_name); - // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByRangeParam.table_name) + // @@protoc_insertion_point(field_set_allocated:milvus.grpc.DeleteByDateParam.table_name) } #ifdef __GNUC__ @@ -4627,6 +5525,12 @@ inline void DeleteByRangeParam::set_allocated_table_name(std::string* table_name // ------------------------------------------------------------------- +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + +// ------------------------------------------------------------------- + // @@protoc_insertion_point(namespace_scope) diff --git a/core/src/grpc/milvus.proto b/core/src/grpc/milvus.proto index 2856dfc6d9..c2dbbc43ab 100644 --- a/core/src/grpc/milvus.proto +++ b/core/src/grpc/milvus.proto @@ -5,14 +5,21 @@ import "status.proto"; package milvus.grpc; /** - * @brief Table Name + * @brief Table name */ message TableName { string table_name = 1; } /** - * @brief Table Name List + * @brief Partition name + */ +message PartitionName { + string partition_name = 1; +} + +/** + * @brief Table name list */ message TableNameList { Status status = 1; @@ -20,7 +27,7 @@ message TableNameList { } /** - * @brief Table Schema + * @brief Table schema */ message TableSchema { Status status = 1; @@ -31,7 +38,24 @@ message TableSchema { } /** - * @brief Range Schema + * @brief Params of partition + */ +message PartitionParam { + string table_name = 1; + string partition_name = 2; + string tag = 3; +} + +/** + * @brief Partition list + */ +message PartitionList { + Status status = 1; + repeated PartitionParam partition_array = 2; +} + +/** + * @brief Range schema */ message Range { string start_value = 1; @@ -46,12 +70,13 @@ message RowRecord { } /** - * @brief params to be inserted + * @brief Params to be inserted */ message InsertParam { string table_name = 1; repeated RowRecord row_record_array = 2; repeated int64 row_id_array = 3; //optional + string partition_tag = 4; } /** @@ -63,7 +88,7 @@ message VectorIds { } /** - * @brief params for searching vector + * @brief Params for searching vector */ message SearchParam { string table_name = 1; @@ -71,10 +96,11 @@ message SearchParam { repeated Range query_range_array = 3; int64 topk = 4; int64 nprobe = 5; + repeated string partition_tag_array = 6; } /** - * @brief params for searching vector in files + * @brief Params for searching vector in files */ message SearchInFilesParam { repeated string file_id_array = 1; @@ -105,7 +131,7 @@ message TopKQueryResultList { } /** - * @brief Server String Reply + * @brief Server string Reply */ message StringReply { Status status = 1; @@ -129,7 +155,7 @@ message TableRowCount { } /** - * @brief Give Server Command + * @brief Give server Command */ message Command { string cmd = 1; @@ -155,169 +181,173 @@ message IndexParam { } /** - * @brief table name and range for DeleteByRange + * @brief table name and range for DeleteByDate */ -message DeleteByRangeParam { +message DeleteByDateParam { Range range = 1; string table_name = 2; } service MilvusService { /** - * @brief Create table method + * @brief This method is used to create table * - * This method is used to create table - * - * @param param, use to provide table information to be created. + * @param TableSchema, use to provide table information to be created. * + * @return Status */ rpc CreateTable(TableSchema) returns (Status){} /** - * @brief Test table existence method + * @brief This method is used to test table existence. * - * This method is used to test table existence. - * - * @param table_name, table name is going to be tested. + * @param TableName, table name is going to be tested. * + * @return BoolReply */ rpc HasTable(TableName) returns (BoolReply) {} /** - * @brief Delete table method + * @brief This method is used to get table schema. * - * This method is used to delete table. + * @param TableName, target table name. * - * @param table_name, table name is going to be deleted. - * - */ - rpc DropTable(TableName) returns (Status) {} - - /** - * @brief Build index by table method - * - * This method is used to build index by table in sync mode. - * - * @param table_name, table is going to be built index. - * - */ - rpc CreateIndex(IndexParam) returns (Status) {} - - /** - * @brief Add vector array to table - * - * This method is used to add vector array to table. - * - * @param table_name, table_name is inserted. - * @param record_array, vector array is inserted. - * - * @return vector id array - */ - rpc Insert(InsertParam) returns (VectorIds) {} - - /** - * @brief Query vector - * - * This method is used to query vector in table. - * - * @param table_name, table_name is queried. - * @param query_record_array, all vector are going to be queried. - * @param query_range_array, optional ranges for conditional search. If not specified, search whole table - * @param topk, how many similarity vectors will be searched. - * - * @return query result array. - */ - rpc Search(SearchParam) returns (TopKQueryResultList) {} - - /** - * @brief Internal use query interface - * - * This method is used to query vector in specified files. - * - * @param file_id_array, specified files id array, queried. - * @param query_record_array, all vector are going to be queried. - * @param query_range_array, optional ranges for conditional search. If not specified, search whole table - * @param topk, how many similarity vectors will be searched. - * - * @return query result array. - */ - rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResultList) {} - - /** - * @brief Get table schema - * - * This method is used to get table schema. - * - * @param table_name, target table name. - * - * @return table schema + * @return TableSchema */ rpc DescribeTable(TableName) returns (TableSchema) {} /** - * @brief Get table schema + * @brief This method is used to get table schema. * - * This method is used to get table schema. + * @param TableName, target table name. * - * @param table_name, target table name. - * - * @return table schema + * @return TableRowCount */ rpc CountTable(TableName) returns (TableRowCount) {} /** - * @brief List all tables in database + * @brief This method is used to list all tables. * - * This method is used to list all tables. + * @param Command, dummy parameter. * - * - * @return table names. + * @return TableNameList */ rpc ShowTables(Command) returns (TableNameList) {} /** - * @brief Give the server status + * @brief This method is used to delete table. * - * This method is used to give the server status. + * @param TableName, table name is going to be deleted. * - * @return Server status. + * @return TableNameList + */ + rpc DropTable(TableName) returns (Status) {} + + /** + * @brief This method is used to build index by table in sync mode. + * + * @param IndexParam, index paramters. + * + * @return Status + */ + rpc CreateIndex(IndexParam) returns (Status) {} + + /** + * @brief This method is used to describe index + * + * @param TableName, target table name. + * + * @return IndexParam + */ + rpc DescribeIndex(TableName) returns (IndexParam) {} + + /** + * @brief This method is used to drop index + * + * @param TableName, target table name. + * + * @return Status + */ + rpc DropIndex(TableName) returns (Status) {} + + /** + * @brief This method is used to create partition + * + * @param PartitionParam, partition parameters. + * + * @return Status + */ + rpc CreatePartition(PartitionParam) returns (Status) {} + + /** + * @brief This method is used to show partition information + * + * @param TableName, target table name. + * + * @return PartitionList + */ + rpc ShowPartitions(TableName) returns (PartitionList) {} + + /** + * @brief This method is used to drop partition + * + * @param PartitionParam, target partition. + * + * @return Status + */ + rpc DropPartition(PartitionParam) returns (Status) {} + + /** + * @brief This method is used to add vector array to table. + * + * @param InsertParam, insert parameters. + * + * @return VectorIds + */ + rpc Insert(InsertParam) returns (VectorIds) {} + + /** + * @brief This method is used to query vector in table. + * + * @param SearchParam, search parameters. + * + * @return TopKQueryResultList + */ + rpc Search(SearchParam) returns (TopKQueryResultList) {} + + /** + * @brief This method is used to query vector in specified files. + * + * @param SearchInFilesParam, search in files paremeters. + * + * @return TopKQueryResultList + */ + rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResultList) {} + + /** + * @brief This method is used to give the server status. + * + * @param Command, command string + * + * @return StringReply */ rpc Cmd(Command) returns (StringReply) {} /** - * @brief delete table by range + * @brief This method is used to delete vector by date range * - * This method is used to delete vector by range + * @param DeleteByDateParam, delete parameters. * - * @return rpc status. + * @return status */ - rpc DeleteByRange(DeleteByRangeParam) returns (Status) {} + rpc DeleteByDate(DeleteByDateParam) returns (Status) {} /** - * @brief preload table + * @brief This method is used to preload table * - * This method is used to preload table + * @param TableName, target table name. * - * @return Status. + * @return Status */ rpc PreloadTable(TableName) returns (Status) {} - - /** - * @brief describe index - * - * This method is used to describe index - * - * @return Status. - */ - rpc DescribeIndex(TableName) returns (IndexParam) {} - - /** - * @brief drop index - * - * This method is used to drop index - * - * @return Status. - */ - rpc DropIndex(TableName) returns (Status) {} - } diff --git a/core/src/scheduler/job/SearchJob.cpp b/core/src/scheduler/job/SearchJob.cpp index 47c825c122..ec93c69f55 100644 --- a/core/src/scheduler/job/SearchJob.cpp +++ b/core/src/scheduler/job/SearchJob.cpp @@ -49,13 +49,21 @@ void SearchJob::SearchDone(size_t index_id) { std::unique_lock lock(mutex_); index_files_.erase(index_id); - cv_.notify_all(); + if (index_files_.empty()) { + cv_.notify_all(); + } + SERVER_LOG_DEBUG << "SearchJob " << id() << " finish index file: " << index_id; } -ResultSet& -SearchJob::GetResult() { - return result_; +ResultIds& +SearchJob::GetResultIds() { + return result_ids_; +} + +ResultDistances& +SearchJob::GetResultDistances() { + return result_distances_; } Status& diff --git a/core/src/scheduler/job/SearchJob.h b/core/src/scheduler/job/SearchJob.h index 1e586090b9..ff5ab34131 100644 --- a/core/src/scheduler/job/SearchJob.h +++ b/core/src/scheduler/job/SearchJob.h @@ -29,6 +29,7 @@ #include #include "Job.h" +#include "db/Types.h" #include "db/meta/MetaTypes.h" namespace milvus { @@ -37,9 +38,9 @@ namespace scheduler { using engine::meta::TableFileSchemaPtr; using Id2IndexMap = std::unordered_map; -using IdDistPair = std::pair; -using Id2DistVec = std::vector; -using ResultSet = std::vector; + +using ResultIds = engine::ResultIds; +using ResultDistances = engine::ResultDistances; class SearchJob : public Job { public: @@ -55,8 +56,11 @@ class SearchJob : public Job { void SearchDone(size_t index_id); - ResultSet& - GetResult(); + ResultIds& + GetResultIds(); + + ResultDistances& + GetResultDistances(); Status& GetStatus(); @@ -90,6 +94,11 @@ class SearchJob : public Job { return index_files_; } + std::mutex& + mutex() { + return mutex_; + } + private: uint64_t topk_ = 0; uint64_t nq_ = 0; @@ -99,7 +108,8 @@ class SearchJob : public Job { Id2IndexMap index_files_; // TODO: column-base better ? - ResultSet result_; + ResultIds result_ids_; + ResultDistances result_distances_; Status status_; std::mutex mutex_; diff --git a/core/src/scheduler/task/SearchTask.cpp b/core/src/scheduler/task/SearchTask.cpp index 1bf1caff76..08bc6525aa 100644 --- a/core/src/scheduler/task/SearchTask.cpp +++ b/core/src/scheduler/task/SearchTask.cpp @@ -219,8 +219,11 @@ XSearchTask::Execute() { // step 3: pick up topk result auto spec_k = index_engine_->Count() < topk ? index_engine_->Count() : topk; - XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2, - search_job->GetResult()); + { + std::unique_lock lock(search_job->mutex()); + XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2, + search_job->GetResultIds(), search_job->GetResultDistances()); + } span = rc.RecordSection(hdr + ", reduce topk"); // search_job->AccumReduceCost(span); @@ -240,71 +243,69 @@ XSearchTask::Execute() { } void -XSearchTask::MergeTopkToResultSet(const std::vector& input_ids, const std::vector& input_distance, - uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, - scheduler::ResultSet& result) { - if (result.empty()) { - result.resize(nq); +XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances, + size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, + scheduler::ResultDistances& tar_distances) { + if (src_ids.empty()) { + return; } + size_t tar_k = tar_ids.size() / nq; + size_t buf_k = std::min(topk, src_k + tar_k); + + scheduler::ResultIds buf_ids(nq * buf_k, -1); + scheduler::ResultDistances buf_distances(nq * buf_k, 0.0); + for (uint64_t i = 0; i < nq; i++) { - scheduler::Id2DistVec result_buf; - auto& result_i = result[i]; + size_t buf_k_j = 0, src_k_j = 0, tar_k_j = 0; + size_t buf_idx, src_idx, tar_idx; - if (result[i].empty()) { - result_buf.resize(input_k, scheduler::IdDistPair(-1, 0.0)); - uint64_t input_k_multi_i = topk * i; - for (auto k = 0; k < input_k; ++k) { - uint64_t idx = input_k_multi_i + k; - auto& result_buf_item = result_buf[k]; - result_buf_item.first = input_ids[idx]; - result_buf_item.second = input_distance[idx]; + size_t buf_k_multi_i = buf_k * i; + size_t src_k_multi_i = topk * i; + size_t tar_k_multi_i = tar_k * i; + + while (buf_k_j < buf_k && src_k_j < src_k && tar_k_j < tar_k) { + src_idx = src_k_multi_i + src_k_j; + tar_idx = tar_k_multi_i + tar_k_j; + buf_idx = buf_k_multi_i + buf_k_j; + + if ((ascending && src_distances[src_idx] < tar_distances[tar_idx]) || + (!ascending && src_distances[src_idx] > tar_distances[tar_idx])) { + buf_ids[buf_idx] = src_ids[src_idx]; + buf_distances[buf_idx] = src_distances[src_idx]; + src_k_j++; + } else { + buf_ids[buf_idx] = tar_ids[tar_idx]; + buf_distances[buf_idx] = tar_distances[tar_idx]; + tar_k_j++; } - } else { - size_t tar_size = result_i.size(); - uint64_t output_k = std::min(topk, input_k + tar_size); - result_buf.resize(output_k, scheduler::IdDistPair(-1, 0.0)); - size_t buf_k = 0, src_k = 0, tar_k = 0; - uint64_t src_idx; - uint64_t input_k_multi_i = topk * i; - while (buf_k < output_k && src_k < input_k && tar_k < tar_size) { - src_idx = input_k_multi_i + src_k; - auto& result_buf_item = result_buf[buf_k]; - auto& result_item = result_i[tar_k]; - if ((ascending && input_distance[src_idx] < result_item.second) || - (!ascending && input_distance[src_idx] > result_item.second)) { - result_buf_item.first = input_ids[src_idx]; - result_buf_item.second = input_distance[src_idx]; - src_k++; - } else { - result_buf_item = result_item; - tar_k++; + buf_k_j++; + } + + if (buf_k_j < buf_k) { + if (src_k_j < src_k) { + while (buf_k_j < buf_k && src_k_j < src_k) { + buf_idx = buf_k_multi_i + buf_k_j; + src_idx = src_k_multi_i + src_k_j; + buf_ids[buf_idx] = src_ids[src_idx]; + buf_distances[buf_idx] = src_distances[src_idx]; + src_k_j++; + buf_k_j++; } - buf_k++; - } - - if (buf_k < output_k) { - if (src_k < input_k) { - while (buf_k < output_k && src_k < input_k) { - src_idx = input_k_multi_i + src_k; - auto& result_buf_item = result_buf[buf_k]; - result_buf_item.first = input_ids[src_idx]; - result_buf_item.second = input_distance[src_idx]; - src_k++; - buf_k++; - } - } else { - while (buf_k < output_k && tar_k < tar_size) { - result_buf[buf_k] = result_i[tar_k]; - tar_k++; - buf_k++; - } + } else { + while (buf_k_j < buf_k && tar_k_j < tar_k) { + buf_idx = buf_k_multi_i + buf_k_j; + tar_idx = tar_k_multi_i + tar_k_j; + buf_ids[buf_idx] = tar_ids[tar_idx]; + buf_distances[buf_idx] = tar_distances[tar_idx]; + tar_k_j++; + buf_k_j++; } } } - - result_i.swap(result_buf); } + tar_ids.swap(buf_ids); + tar_distances.swap(buf_distances); } // void diff --git a/core/src/scheduler/task/SearchTask.h b/core/src/scheduler/task/SearchTask.h index bbc8b5bd8f..bd51137341 100644 --- a/core/src/scheduler/task/SearchTask.h +++ b/core/src/scheduler/task/SearchTask.h @@ -39,8 +39,9 @@ class XSearchTask : public Task { public: static void - MergeTopkToResultSet(const std::vector& input_ids, const std::vector& input_distance, - uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, scheduler::ResultSet& result); + MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances, + size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids, + scheduler::ResultDistances& tar_distances); // static void // MergeTopkArray(std::vector& tar_ids, std::vector& tar_distance, uint64_t& tar_input_k, diff --git a/core/src/sdk/examples/CMakeLists.txt b/core/src/sdk/examples/CMakeLists.txt index aa15190178..a394f1ce9b 100644 --- a/core/src/sdk/examples/CMakeLists.txt +++ b/core/src/sdk/examples/CMakeLists.txt @@ -17,5 +17,7 @@ # under the License. #------------------------------------------------------------------------------- +aux_source_directory(${MILVUS_SOURCE_DIR}/src/sdk/examples/utils util_files) -add_subdirectory(grpcsimple) +add_subdirectory(simple) +add_subdirectory(partition) diff --git a/core/src/sdk/examples/grpcsimple/src/ClientTest.cpp b/core/src/sdk/examples/grpcsimple/src/ClientTest.cpp deleted file mode 100644 index 069283200f..0000000000 --- a/core/src/sdk/examples/grpcsimple/src/ClientTest.cpp +++ /dev/null @@ -1,371 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "sdk/examples/grpcsimple/src/ClientTest.h" -#include "MilvusApi.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -//#define SET_VECTOR_IDS; - -namespace { -const std::string& -GetTableName(); - -const char* TABLE_NAME = GetTableName().c_str(); -constexpr int64_t TABLE_DIMENSION = 512; -constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024; -constexpr int64_t BATCH_ROW_COUNT = 100000; -constexpr int64_t NQ = 5; -constexpr int64_t TOP_K = 10; -constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different -constexpr int64_t ADD_VECTOR_LOOP = 5; -constexpr int64_t SECONDS_EACH_HOUR = 3600; -constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; -constexpr int32_t N_LIST = 15000; - -#define BLOCK_SPLITER std::cout << "===========================================" << std::endl; - -void -PrintTableSchema(const milvus::TableSchema& tb_schema) { - BLOCK_SPLITER - std::cout << "Table name: " << tb_schema.table_name << std::endl; - std::cout << "Table dimension: " << tb_schema.dimension << std::endl; - BLOCK_SPLITER -} - -void -PrintSearchResult(const std::vector>& search_record_array, - const std::vector& topk_query_result_array) { - BLOCK_SPLITER - std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl; - - int32_t index = 0; - for (auto& result : topk_query_result_array) { - auto search_id = search_record_array[index].first; - index++; - std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id) << " top " - << std::to_string(result.query_result_arrays.size()) << " search result:" << std::endl; - for (auto& item : result.query_result_arrays) { - std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance); - std::cout << std::endl; - } - } - - BLOCK_SPLITER -} - -std::string -CurrentTime() { - time_t tt; - time(&tt); - tt = tt + 8 * SECONDS_EACH_HOUR; - tm t; - gmtime_r(&tt, &t); - - std::string str = std::to_string(t.tm_year + 1900) + "_" + std::to_string(t.tm_mon + 1) + "_" + - std::to_string(t.tm_mday) + "_" + std::to_string(t.tm_hour) + "_" + std::to_string(t.tm_min) + - "_" + std::to_string(t.tm_sec); - - return str; -} - -std::string -CurrentTmDate(int64_t offset_day = 0) { - time_t tt; - time(&tt); - tt = tt + 8 * SECONDS_EACH_HOUR; - tt = tt + 24 * SECONDS_EACH_HOUR * offset_day; - tm t; - gmtime_r(&tt, &t); - - std::string str = - std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday); - - return str; -} - -const std::string& -GetTableName() { - static std::string s_id("tbl_" + CurrentTime()); - return s_id; -} - -milvus::TableSchema -BuildTableSchema() { - milvus::TableSchema tb_schema; - tb_schema.table_name = TABLE_NAME; - tb_schema.dimension = TABLE_DIMENSION; - tb_schema.index_file_size = TABLE_INDEX_FILE_SIZE; - tb_schema.metric_type = milvus::MetricType::L2; - - return tb_schema; -} - -void -BuildVectors(int64_t from, int64_t to, std::vector& vector_record_array) { - if (to <= from) { - return; - } - - vector_record_array.clear(); - for (int64_t k = from; k < to; k++) { - milvus::RowRecord record; - record.data.resize(TABLE_DIMENSION); - for (int64_t i = 0; i < TABLE_DIMENSION; i++) { - record.data[i] = (float)(k % (i + 1)); - } - - vector_record_array.emplace_back(record); - } -} - -void -Sleep(int seconds) { - std::cout << "Waiting " << seconds << " seconds ..." << std::endl; - sleep(seconds); -} - -class TimeRecorder { - public: - explicit TimeRecorder(const std::string& title) : title_(title) { - start_ = std::chrono::system_clock::now(); - } - - ~TimeRecorder() { - std::chrono::system_clock::time_point end = std::chrono::system_clock::now(); - int64_t span = (std::chrono::duration_cast(end - start_)).count(); - std::cout << title_ << " totally cost: " << span << " ms" << std::endl; - } - - private: - std::string title_; - std::chrono::system_clock::time_point start_; -}; - -void -CheckResult(const std::vector>& search_record_array, - const std::vector& topk_query_result_array) { - BLOCK_SPLITER - int64_t index = 0; - for (auto& result : topk_query_result_array) { - auto result_id = result.query_result_arrays[0].id; - auto search_id = search_record_array[index++].first; - if (result_id != search_id) { - std::cout << "The top 1 result is wrong: " << result_id << " vs. " << search_id << std::endl; - } else { - std::cout << "Check result sucessfully" << std::endl; - } - } - BLOCK_SPLITER -} - -void -DoSearch(std::shared_ptr conn, - const std::vector>& search_record_array, const std::string& phase_name) { - std::vector query_range_array; - milvus::Range rg; - rg.start_value = CurrentTmDate(); - rg.end_value = CurrentTmDate(1); - query_range_array.emplace_back(rg); - - std::vector record_array; - for (auto& pair : search_record_array) { - record_array.push_back(pair.second); - } - - auto start = std::chrono::high_resolution_clock::now(); - std::vector topk_query_result_array; - { - TimeRecorder rc(phase_name); - milvus::Status stat = - conn->Search(TABLE_NAME, record_array, query_range_array, TOP_K, 32, topk_query_result_array); - std::cout << "SearchVector function call status: " << stat.message() << std::endl; - } - auto finish = std::chrono::high_resolution_clock::now(); - std::cout << "SEARCHVECTOR COST: " - << std::chrono::duration_cast>(finish - start).count() << "s\n"; - - PrintSearchResult(search_record_array, topk_query_result_array); - CheckResult(search_record_array, topk_query_result_array); -} -} // namespace - -void -ClientTest::Test(const std::string& address, const std::string& port) { - std::shared_ptr conn = milvus::Connection::Create(); - - { // connect server - milvus::ConnectParam param = {address, port}; - milvus::Status stat = conn->Connect(param); - std::cout << "Connect function call status: " << stat.message() << std::endl; - } - - { // server version - std::string version = conn->ServerVersion(); - std::cout << "Server version: " << version << std::endl; - } - - { // sdk version - std::string version = conn->ClientVersion(); - std::cout << "SDK version: " << version << std::endl; - } - - { - std::vector tables; - milvus::Status stat = conn->ShowTables(tables); - std::cout << "ShowTables function call status: " << stat.message() << std::endl; - std::cout << "All tables: " << std::endl; - for (auto& table : tables) { - int64_t row_count = 0; - // conn->DropTable(table); - stat = conn->CountTable(table, row_count); - std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl; - } - } - - { // create table - milvus::TableSchema tb_schema = BuildTableSchema(); - milvus::Status stat = conn->CreateTable(tb_schema); - std::cout << "CreateTable function call status: " << stat.message() << std::endl; - PrintTableSchema(tb_schema); - - bool has_table = conn->HasTable(tb_schema.table_name); - if (has_table) { - std::cout << "Table is created" << std::endl; - } - } - - { // describe table - milvus::TableSchema tb_schema; - milvus::Status stat = conn->DescribeTable(TABLE_NAME, tb_schema); - std::cout << "DescribeTable function call status: " << stat.message() << std::endl; - PrintTableSchema(tb_schema); - } - - std::vector> search_record_array; - { // insert vectors - for (int i = 0; i < ADD_VECTOR_LOOP; i++) { // add vectors - std::vector record_array; - int64_t begin_index = i * BATCH_ROW_COUNT; - BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array); - -#ifdef SET_VECTOR_IDS - record_ids.resize(ADD_VECTOR_LOOP * BATCH_ROW_COUNT); - for (auto j = begin_index; j < begin_index + BATCH_ROW_COUNT; j++) { - record_ids[i * BATCH_ROW_COUNT + j] = i * BATCH_ROW_COUNT + j; - } -#endif - - std::vector record_ids; - // generate user defined ids - for (int k = 0; k < BATCH_ROW_COUNT; k++) { - record_ids.push_back(i * BATCH_ROW_COUNT + k); - } - - auto start = std::chrono::high_resolution_clock::now(); - - milvus::Status stat = conn->Insert(TABLE_NAME, record_array, record_ids); - auto finish = std::chrono::high_resolution_clock::now(); - std::cout << "InsertVector cost: " - << std::chrono::duration_cast>(finish - start).count() << "s\n"; - - std::cout << "InsertVector function call status: " << stat.message() << std::endl; - std::cout << "Returned id array count: " << record_ids.size() << std::endl; - - if (search_record_array.size() < NQ) { - search_record_array.push_back(std::make_pair(record_ids[SEARCH_TARGET], record_array[SEARCH_TARGET])); - } - } - } - - { // search vectors without index - Sleep(2); - - int64_t row_count = 0; - milvus::Status stat = conn->CountTable(TABLE_NAME, row_count); - std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; - // DoSearch(conn, search_record_array, "Search without index"); - } - - { // wait unit build index finish - std::cout << "Wait until create all index done" << std::endl; - milvus::IndexParam index; - index.table_name = TABLE_NAME; - index.index_type = INDEX_TYPE; - index.nlist = N_LIST; - milvus::Status stat = conn->CreateIndex(index); - std::cout << "CreateIndex function call status: " << stat.message() << std::endl; - - milvus::IndexParam index2; - stat = conn->DescribeIndex(TABLE_NAME, index2); - std::cout << "DescribeIndex function call status: " << stat.message() << std::endl; - } - - { // preload table - milvus::Status stat = conn->PreloadTable(TABLE_NAME); - std::cout << "PreloadTable function call status: " << stat.message() << std::endl; - } - - { // search vectors after build index finish - for (uint64_t i = 0; i < 5; ++i) { - DoSearch(conn, search_record_array, "Search after build index finish"); - } - // std::cout << conn->DumpTaskTables() << std::endl; - } - - { // delete index - milvus::Status stat = conn->DropIndex(TABLE_NAME); - std::cout << "DropIndex function call status: " << stat.message() << std::endl; - - int64_t row_count = 0; - stat = conn->CountTable(TABLE_NAME, row_count); - std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; - } - - { // delete by range - milvus::Range rg; - rg.start_value = CurrentTmDate(-3); - rg.end_value = CurrentTmDate(-2); - - milvus::Status stat = conn->DeleteByRange(rg, TABLE_NAME); - std::cout << "DeleteByRange function call status: " << stat.message() << std::endl; - } - - { - // delete table - // Status stat = conn->DropTable(TABLE_NAME); - // std::cout << "DeleteTable function call status: " << stat.message() << std::endl; - } - - { // server status - std::string status = conn->ServerStatus(); - std::cout << "Server status before disconnect: " << status << std::endl; - } - milvus::Connection::Destroy(conn); - { // server status - std::string status = conn->ServerStatus(); - std::cout << "Server status after disconnect: " << status << std::endl; - } -} diff --git a/core/src/sdk/examples/partition/CMakeLists.txt b/core/src/sdk/examples/partition/CMakeLists.txt new file mode 100644 index 0000000000..dc5ea46a62 --- /dev/null +++ b/core/src/sdk/examples/partition/CMakeLists.txt @@ -0,0 +1,34 @@ +#------------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +#------------------------------------------------------------------------------- + + +aux_source_directory(src src_files) + +add_executable(sdk_partition + main.cpp + ${src_files} + ${util_files} + ) + +target_link_libraries(sdk_partition + milvus_sdk + pthread + ) + +install(TARGETS sdk_partition DESTINATION bin) diff --git a/core/src/sdk/examples/partition/main.cpp b/core/src/sdk/examples/partition/main.cpp new file mode 100644 index 0000000000..f0de9b1fc4 --- /dev/null +++ b/core/src/sdk/examples/partition/main.cpp @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "sdk/examples/partition/src/ClientTest.h" + +void +print_help(const std::string& app_name); + +int +main(int argc, char* argv[]) { + printf("Client start...\n"); + + std::string app_name = basename(argv[0]); + static struct option long_options[] = {{"server", optional_argument, nullptr, 's'}, + {"port", optional_argument, nullptr, 'p'}, + {"help", no_argument, nullptr, 'h'}, + {nullptr, 0, nullptr, 0}}; + + int option_index = 0; + std::string address = "127.0.0.1", port = "19530"; + app_name = argv[0]; + + int value; + while ((value = getopt_long(argc, argv, "s:p:h", long_options, &option_index)) != -1) { + switch (value) { + case 's': { + char* address_ptr = strdup(optarg); + address = address_ptr; + free(address_ptr); + break; + } + case 'p': { + char* port_ptr = strdup(optarg); + port = port_ptr; + free(port_ptr); + break; + } + case 'h': + default: + print_help(app_name); + return EXIT_SUCCESS; + } + } + + ClientTest test; + test.Test(address, port); + + printf("Client stop...\n"); + return 0; +} + +void +print_help(const std::string& app_name) { + printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str()); + printf(" Options:\n"); + printf(" -s --server Server address, default 127.0.0.1\n"); + printf(" -p --port Server port, default 19530\n"); + printf(" -h --help Print help information\n"); + printf("\n"); +} diff --git a/core/src/sdk/examples/partition/src/ClientTest.cpp b/core/src/sdk/examples/partition/src/ClientTest.cpp new file mode 100644 index 0000000000..6e4a7d1826 --- /dev/null +++ b/core/src/sdk/examples/partition/src/ClientTest.cpp @@ -0,0 +1,205 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/partition/src/ClientTest.h" +#include "MilvusApi.h" +#include "sdk/examples/utils/Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace { + +const char* TABLE_NAME = milvus_sdk::Utils::GenTableName().c_str(); + +constexpr int64_t TABLE_DIMENSION = 512; +constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024; +constexpr milvus::MetricType TABLE_METRIC_TYPE = milvus::MetricType::L2; +constexpr int64_t BATCH_ROW_COUNT = 10000; +constexpr int64_t NQ = 5; +constexpr int64_t TOP_K = 10; +constexpr int64_t NPROBE = 32; +constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different +constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; +constexpr int32_t N_LIST = 15000; +constexpr int32_t PARTITION_COUNT = 5; +constexpr int32_t TARGET_PARTITION = 3; + +milvus::TableSchema +BuildTableSchema() { + milvus::TableSchema tb_schema = {TABLE_NAME, TABLE_DIMENSION, TABLE_INDEX_FILE_SIZE, TABLE_METRIC_TYPE}; + return tb_schema; +} + +milvus::PartitionParam +BuildPartitionParam(int32_t index) { + std::string tag = std::to_string(index); + std::string partition_name = std::string(TABLE_NAME) + "_" + tag; + milvus::PartitionParam partition_param = {TABLE_NAME, partition_name, tag}; + return partition_param; +} + +milvus::IndexParam +BuildIndexParam() { + milvus::IndexParam index_param = {TABLE_NAME, INDEX_TYPE, N_LIST}; + return index_param; +} + +} // namespace + +void +ClientTest::Test(const std::string& address, const std::string& port) { + std::shared_ptr conn = milvus::Connection::Create(); + + milvus::Status stat; + { // connect server + milvus::ConnectParam param = {address, port}; + stat = conn->Connect(param); + std::cout << "Connect function call status: " << stat.message() << std::endl; + } + + { // create table + milvus::TableSchema tb_schema = BuildTableSchema(); + stat = conn->CreateTable(tb_schema); + std::cout << "CreateTable function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintTableSchema(tb_schema); + } + + { // create partition + for (int32_t i = 0; i < PARTITION_COUNT; i++) { + milvus::PartitionParam partition_param = BuildPartitionParam(i); + stat = conn->CreatePartition(partition_param); + std::cout << "CreatePartition function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintPartitionParam(partition_param); + } + } + + { // insert vectors + milvus_sdk::TimeRecorder rc("All vectors"); + for (int i = 0; i < PARTITION_COUNT * 5; i++) { + std::vector record_array; + std::vector record_ids; + int64_t begin_index = i * BATCH_ROW_COUNT; + { // generate vectors + milvus_sdk::TimeRecorder rc("Build vectors No." + std::to_string(i)); + milvus_sdk::Utils::BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array, record_ids, + TABLE_DIMENSION); + } + + std::string title = "Insert " + std::to_string(record_array.size()) + " vectors No." + std::to_string(i); + milvus_sdk::TimeRecorder rc(title); + stat = conn->Insert(TABLE_NAME, std::to_string(i % PARTITION_COUNT), record_array, record_ids); + } + } + + std::vector> search_record_array; + { // build search vectors + std::vector record_array; + std::vector record_ids; + int64_t index = TARGET_PARTITION * BATCH_ROW_COUNT + SEARCH_TARGET; + milvus_sdk::Utils::BuildVectors(index, index + 1, record_array, record_ids, TABLE_DIMENSION); + search_record_array.push_back(std::make_pair(record_ids[0], record_array[0])); + } + + milvus_sdk::Utils::Sleep(3); + + { // table row count + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // search vectors + std::cout << "Search in correct partition" << std::endl; + std::vector partiton_tags = {std::to_string(TARGET_PARTITION)}; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + std::cout << "Search in wrong partition" << std::endl; + partiton_tags = {"0"}; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + + std::cout << "Search by regex matched partition tag" << std::endl; + partiton_tags = {"\\d"}; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // wait unit build index finish + std::cout << "Wait until create all index done" << std::endl; + milvus::IndexParam index1 = BuildIndexParam(); + milvus_sdk::Utils::PrintIndexParam(index1); + stat = conn->CreateIndex(index1); + std::cout << "CreateIndex function call status: " << stat.message() << std::endl; + + milvus::IndexParam index2; + stat = conn->DescribeIndex(TABLE_NAME, index2); + std::cout << "DescribeIndex function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintIndexParam(index2); + } + + { // table row count + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // drop partition + milvus::PartitionParam param1 = {TABLE_NAME, "", std::to_string(TARGET_PARTITION)}; + milvus_sdk::Utils::PrintPartitionParam(param1); + stat = conn->DropPartition(param1); + std::cout << "DropPartition function call status: " << stat.message() << std::endl; + } + + { // table row count + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // search vectors + std::cout << "Search in whole table" << std::endl; + std::vector partiton_tags; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // drop index + stat = conn->DropIndex(TABLE_NAME); + std::cout << "DropIndex function call status: " << stat.message() << std::endl; + + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // drop table + stat = conn->DropTable(TABLE_NAME); + std::cout << "DropTable function call status: " << stat.message() << std::endl; + } + + milvus::Connection::Destroy(conn); +} diff --git a/core/src/sdk/examples/grpcsimple/src/ClientTest.h b/core/src/sdk/examples/partition/src/ClientTest.h similarity index 100% rename from core/src/sdk/examples/grpcsimple/src/ClientTest.h rename to core/src/sdk/examples/partition/src/ClientTest.h diff --git a/core/src/sdk/examples/grpcsimple/CMakeLists.txt b/core/src/sdk/examples/simple/CMakeLists.txt similarity index 98% rename from core/src/sdk/examples/grpcsimple/CMakeLists.txt rename to core/src/sdk/examples/simple/CMakeLists.txt index 77542ed2a7..82680e31be 100644 --- a/core/src/sdk/examples/grpcsimple/CMakeLists.txt +++ b/core/src/sdk/examples/simple/CMakeLists.txt @@ -17,12 +17,12 @@ # under the License. #------------------------------------------------------------------------------- - aux_source_directory(src src_files) add_executable(sdk_simple main.cpp ${src_files} + ${util_files} ) target_link_libraries(sdk_simple diff --git a/core/src/sdk/examples/grpcsimple/main.cpp b/core/src/sdk/examples/simple/main.cpp similarity index 98% rename from core/src/sdk/examples/grpcsimple/main.cpp rename to core/src/sdk/examples/simple/main.cpp index c31f491afb..c08741606c 100644 --- a/core/src/sdk/examples/grpcsimple/main.cpp +++ b/core/src/sdk/examples/simple/main.cpp @@ -20,7 +20,7 @@ #include #include -#include "src/ClientTest.h" +#include "sdk/examples/simple/src/ClientTest.h" void print_help(const std::string& app_name); diff --git a/core/src/sdk/examples/simple/src/ClientTest.cpp b/core/src/sdk/examples/simple/src/ClientTest.cpp new file mode 100644 index 0000000000..9045168f2a --- /dev/null +++ b/core/src/sdk/examples/simple/src/ClientTest.cpp @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/simple/src/ClientTest.h" +#include "MilvusApi.h" +#include "sdk/examples/utils/TimeRecorder.h" +#include "sdk/examples/utils/Utils.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +const char* TABLE_NAME = milvus_sdk::Utils::GenTableName().c_str(); + +constexpr int64_t TABLE_DIMENSION = 512; +constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024; +constexpr milvus::MetricType TABLE_METRIC_TYPE = milvus::MetricType::L2; +constexpr int64_t BATCH_ROW_COUNT = 100000; +constexpr int64_t NQ = 5; +constexpr int64_t TOP_K = 10; +constexpr int64_t NPROBE = 32; +constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different +constexpr int64_t ADD_VECTOR_LOOP = 5; +constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; +constexpr int32_t N_LIST = 15000; + +milvus::TableSchema +BuildTableSchema() { + milvus::TableSchema tb_schema = {TABLE_NAME, TABLE_DIMENSION, TABLE_INDEX_FILE_SIZE, TABLE_METRIC_TYPE}; + return tb_schema; +} + +milvus::IndexParam +BuildIndexParam() { + milvus::IndexParam index_param = {TABLE_NAME, INDEX_TYPE, N_LIST}; + return index_param; +} + +} // namespace + +void +ClientTest::Test(const std::string& address, const std::string& port) { + std::shared_ptr conn = milvus::Connection::Create(); + + milvus::Status stat; + { // connect server + milvus::ConnectParam param = {address, port}; + stat = conn->Connect(param); + std::cout << "Connect function call status: " << stat.message() << std::endl; + } + + { // server version + std::string version = conn->ServerVersion(); + std::cout << "Server version: " << version << std::endl; + } + + { // sdk version + std::string version = conn->ClientVersion(); + std::cout << "SDK version: " << version << std::endl; + } + + { // show tables + std::vector tables; + stat = conn->ShowTables(tables); + std::cout << "ShowTables function call status: " << stat.message() << std::endl; + std::cout << "All tables: " << std::endl; + for (auto& table : tables) { + int64_t row_count = 0; + // conn->DropTable(table); + stat = conn->CountTable(table, row_count); + std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl; + } + } + + { // create table + milvus::TableSchema tb_schema = BuildTableSchema(); + stat = conn->CreateTable(tb_schema); + std::cout << "CreateTable function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintTableSchema(tb_schema); + + bool has_table = conn->HasTable(tb_schema.table_name); + if (has_table) { + std::cout << "Table is created" << std::endl; + } + } + + { // describe table + milvus::TableSchema tb_schema; + stat = conn->DescribeTable(TABLE_NAME, tb_schema); + std::cout << "DescribeTable function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintTableSchema(tb_schema); + } + + { // insert vectors + for (int i = 0; i < ADD_VECTOR_LOOP; i++) { + std::vector record_array; + std::vector record_ids; + int64_t begin_index = i * BATCH_ROW_COUNT; + { // generate vectors + milvus_sdk::TimeRecorder rc("Build vectors No." + std::to_string(i)); + milvus_sdk::Utils::BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array, record_ids, + TABLE_DIMENSION); + } + + std::string title = "Insert " + std::to_string(record_array.size()) + " vectors No." + std::to_string(i); + milvus_sdk::TimeRecorder rc(title); + stat = conn->Insert(TABLE_NAME, "", record_array, record_ids); + std::cout << "InsertVector function call status: " << stat.message() << std::endl; + std::cout << "Returned id array count: " << record_ids.size() << std::endl; + } + } + + std::vector> search_record_array; + { // build search vectors + for (int64_t i = 0; i < NQ; i++) { + std::vector record_array; + std::vector record_ids; + int64_t index = i * BATCH_ROW_COUNT + SEARCH_TARGET; + milvus_sdk::Utils::BuildVectors(index, index + 1, record_array, record_ids, TABLE_DIMENSION); + search_record_array.push_back(std::make_pair(record_ids[0], record_array[0])); + } + } + + milvus_sdk::Utils::Sleep(3); + { // search vectors + std::vector partiton_tags; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // wait unit build index finish + std::cout << "Wait until create all index done" << std::endl; + milvus::IndexParam index1 = BuildIndexParam(); + milvus_sdk::Utils::PrintIndexParam(index1); + stat = conn->CreateIndex(index1); + std::cout << "CreateIndex function call status: " << stat.message() << std::endl; + + milvus::IndexParam index2; + stat = conn->DescribeIndex(TABLE_NAME, index2); + std::cout << "DescribeIndex function call status: " << stat.message() << std::endl; + milvus_sdk::Utils::PrintIndexParam(index2); + } + + { // preload table + stat = conn->PreloadTable(TABLE_NAME); + std::cout << "PreloadTable function call status: " << stat.message() << std::endl; + } + + { // search vectors + std::vector partiton_tags; + std::vector topk_query_result_array; + milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array, + topk_query_result_array); + } + + { // drop index + stat = conn->DropIndex(TABLE_NAME); + std::cout << "DropIndex function call status: " << stat.message() << std::endl; + + int64_t row_count = 0; + stat = conn->CountTable(TABLE_NAME, row_count); + std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl; + } + + { // delete by range + milvus::Range rg; + rg.start_value = milvus_sdk::Utils::CurrentTmDate(-3); + rg.end_value = milvus_sdk::Utils::CurrentTmDate(-2); + + stat = conn->DeleteByDate(TABLE_NAME, rg); + std::cout << "DeleteByDate function call status: " << stat.message() << std::endl; + } + + { // drop table + stat = conn->DropTable(TABLE_NAME); + std::cout << "DropTable function call status: " << stat.message() << std::endl; + } + + { // server status + std::string status = conn->ServerStatus(); + std::cout << "Server status before disconnect: " << status << std::endl; + } + milvus::Connection::Destroy(conn); + { // server status + std::string status = conn->ServerStatus(); + std::cout << "Server status after disconnect: " << status << std::endl; + } +} diff --git a/core/src/sdk/examples/simple/src/ClientTest.h b/core/src/sdk/examples/simple/src/ClientTest.h new file mode 100644 index 0000000000..b028b63f44 --- /dev/null +++ b/core/src/sdk/examples/simple/src/ClientTest.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +class ClientTest { + public: + void + Test(const std::string& address, const std::string& port); +}; diff --git a/core/src/sdk/examples/utils/TimeRecorder.cpp b/core/src/sdk/examples/utils/TimeRecorder.cpp new file mode 100644 index 0000000000..cdf9eda5ec --- /dev/null +++ b/core/src/sdk/examples/utils/TimeRecorder.cpp @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/utils/TimeRecorder.h" + +#include + +namespace milvus_sdk { + +TimeRecorder::TimeRecorder(const std::string& title) : title_(title) { + start_ = std::chrono::system_clock::now(); + std::cout << title_ << " begin..." << std::endl; +} + +TimeRecorder::~TimeRecorder() { + std::chrono::system_clock::time_point end = std::chrono::system_clock::now(); + int64_t span = (std::chrono::duration_cast(end - start_)).count(); + std::cout << title_ << " totally cost: " << span << " ms" << std::endl; +} + +} // namespace milvus_sdk diff --git a/core/src/sdk/examples/utils/TimeRecorder.h b/core/src/sdk/examples/utils/TimeRecorder.h new file mode 100644 index 0000000000..edfb9d2679 --- /dev/null +++ b/core/src/sdk/examples/utils/TimeRecorder.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace milvus_sdk { + +class TimeRecorder { + public: + explicit TimeRecorder(const std::string& title); + + ~TimeRecorder(); + + private: + std::string title_; + std::chrono::system_clock::time_point start_; +}; + +} // namespace milvus_sdk diff --git a/core/src/sdk/examples/utils/Utils.cpp b/core/src/sdk/examples/utils/Utils.cpp new file mode 100644 index 0000000000..c527cf47e1 --- /dev/null +++ b/core/src/sdk/examples/utils/Utils.cpp @@ -0,0 +1,223 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "sdk/examples/utils/Utils.h" +#include "sdk/examples/utils/TimeRecorder.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace milvus_sdk { + +constexpr int64_t SECONDS_EACH_HOUR = 3600; + +#define BLOCK_SPLITER std::cout << "===========================================" << std::endl; + +std::string +Utils::CurrentTime() { + time_t tt; + time(&tt); + tt = tt + 8 * SECONDS_EACH_HOUR; + tm t; + gmtime_r(&tt, &t); + + std::string str = std::to_string(t.tm_year + 1900) + "_" + std::to_string(t.tm_mon + 1) + "_" + + std::to_string(t.tm_mday) + "_" + std::to_string(t.tm_hour) + "_" + std::to_string(t.tm_min) + + "_" + std::to_string(t.tm_sec); + + return str; +} + +std::string +Utils::CurrentTmDate(int64_t offset_day) { + time_t tt; + time(&tt); + tt = tt + 8 * SECONDS_EACH_HOUR; + tt = tt + 24 * SECONDS_EACH_HOUR * offset_day; + tm t; + gmtime_r(&tt, &t); + + std::string str = + std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday); + + return str; +} + +void +Utils::Sleep(int seconds) { + std::cout << "Waiting " << seconds << " seconds ..." << std::endl; + sleep(seconds); +} + +const std::string& +Utils::GenTableName() { + static std::string s_id("tbl_" + CurrentTime()); + return s_id; +} + +std::string +Utils::MetricTypeName(const milvus::MetricType& metric_type) { + switch (metric_type) { + case milvus::MetricType::L2: + return "L2 distance"; + case milvus::MetricType::IP: + return "Inner product"; + default: + return "Unknown metric type"; + } +} + +std::string +Utils::IndexTypeName(const milvus::IndexType& index_type) { + switch (index_type) { + case milvus::IndexType::cpu_idmap: + return "cpu idmap"; + case milvus::IndexType::gpu_ivfflat: + return "gpu ivflat"; + case milvus::IndexType::gpu_ivfsq8: + return "gpu ivfsq8"; + case milvus::IndexType::mix_nsg: + return "mix nsg"; + default: + return "Unknown index type"; + } +} + +void +Utils::PrintTableSchema(const milvus::TableSchema& tb_schema) { + BLOCK_SPLITER + std::cout << "Table name: " << tb_schema.table_name << std::endl; + std::cout << "Table dimension: " << tb_schema.dimension << std::endl; + std::cout << "Table index file size: " << tb_schema.index_file_size << std::endl; + std::cout << "Table metric type: " << MetricTypeName(tb_schema.metric_type) << std::endl; + BLOCK_SPLITER +} + +void +Utils::PrintPartitionParam(const milvus::PartitionParam& partition_param) { + BLOCK_SPLITER + std::cout << "Table name: " << partition_param.table_name << std::endl; + std::cout << "Partition name: " << partition_param.partition_name << std::endl; + std::cout << "Partition tag: " << partition_param.partition_tag << std::endl; + BLOCK_SPLITER +} + +void +Utils::PrintIndexParam(const milvus::IndexParam& index_param) { + BLOCK_SPLITER + std::cout << "Index table name: " << index_param.table_name << std::endl; + std::cout << "Index type: " << IndexTypeName(index_param.index_type) << std::endl; + std::cout << "Index nlist: " << index_param.nlist << std::endl; + BLOCK_SPLITER +} + +void +Utils::BuildVectors(int64_t from, int64_t to, std::vector& vector_record_array, + std::vector& record_ids, int64_t dimension) { + if (to <= from) { + return; + } + + vector_record_array.clear(); + record_ids.clear(); + for (int64_t k = from; k < to; k++) { + milvus::RowRecord record; + record.data.resize(dimension); + for (int64_t i = 0; i < dimension; i++) { + record.data[i] = (float)(k % (i + 1)); + } + + vector_record_array.emplace_back(record); + record_ids.push_back(k); + } +} + +void +Utils::PrintSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array) { + BLOCK_SPLITER + std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl; + + int32_t index = 0; + for (auto& result : topk_query_result_array) { + auto search_id = search_record_array[index].first; + index++; + std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id) << " top " + << std::to_string(result.query_result_arrays.size()) << " search result:" << std::endl; + for (auto& item : result.query_result_arrays) { + std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance); + std::cout << std::endl; + } + } + + BLOCK_SPLITER +} + +void +Utils::CheckSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array) { + BLOCK_SPLITER + int64_t index = 0; + for (auto& result : topk_query_result_array) { + auto result_id = result.query_result_arrays[0].id; + auto search_id = search_record_array[index++].first; + if (result_id != search_id) { + std::cout << "The top 1 result is wrong: " << result_id << " vs. " << search_id << std::endl; + } else { + std::cout << "Check result sucessfully" << std::endl; + } + } + BLOCK_SPLITER +} + +void +Utils::DoSearch(std::shared_ptr conn, const std::string& table_name, + const std::vector& partiton_tags, int64_t top_k, int64_t nprobe, + const std::vector>& search_record_array, + std::vector& topk_query_result_array) { + topk_query_result_array.clear(); + + std::vector query_range_array; + milvus::Range rg; + rg.start_value = CurrentTmDate(); + rg.end_value = CurrentTmDate(1); + query_range_array.emplace_back(rg); + + std::vector record_array; + for (auto& pair : search_record_array) { + record_array.push_back(pair.second); + } + + { + BLOCK_SPLITER + milvus_sdk::TimeRecorder rc("search"); + milvus::Status stat = conn->Search(table_name, partiton_tags, record_array, query_range_array, top_k, nprobe, + topk_query_result_array); + std::cout << "SearchVector function call status: " << stat.message() << std::endl; + BLOCK_SPLITER + } + + PrintSearchResult(search_record_array, topk_query_result_array); + CheckSearchResult(search_record_array, topk_query_result_array); +} + +} // namespace milvus_sdk diff --git a/core/src/sdk/examples/utils/Utils.h b/core/src/sdk/examples/utils/Utils.h new file mode 100644 index 0000000000..cab0d8810a --- /dev/null +++ b/core/src/sdk/examples/utils/Utils.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "MilvusApi.h" + +#include +#include +#include +#include + +namespace milvus_sdk { + +class Utils { + public: + static std::string + CurrentTime(); + + static std::string + CurrentTmDate(int64_t offset_day = 0); + + static const std::string& + GenTableName(); + + static void + Sleep(int seconds); + + static std::string + MetricTypeName(const milvus::MetricType& metric_type); + + static std::string + IndexTypeName(const milvus::IndexType& index_type); + + static void + PrintTableSchema(const milvus::TableSchema& tb_schema); + + static void + PrintPartitionParam(const milvus::PartitionParam& partition_param); + + static void + PrintIndexParam(const milvus::IndexParam& index_param); + + static void + BuildVectors(int64_t from, int64_t to, std::vector& vector_record_array, + std::vector& record_ids, int64_t dimension); + + static void + PrintSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array); + + static void + CheckSearchResult(const std::vector>& search_record_array, + const std::vector& topk_query_result_array); + + static void + DoSearch(std::shared_ptr conn, const std::string& table_name, + const std::vector& partiton_tags, int64_t top_k, int64_t nprobe, + const std::vector>& search_record_array, + std::vector& topk_query_result_array); +}; + +} // namespace milvus_sdk diff --git a/core/src/sdk/grpc/ClientProxy.cpp b/core/src/sdk/grpc/ClientProxy.cpp index 91a11adf8c..3321a9f85e 100644 --- a/core/src/sdk/grpc/ClientProxy.cpp +++ b/core/src/sdk/grpc/ClientProxy.cpp @@ -138,8 +138,8 @@ ClientProxy::CreateIndex(const IndexParam& index_param) { } Status -ClientProxy::Insert(const std::string& table_name, const std::vector& record_array, - std::vector& id_array) { +ClientProxy::Insert(const std::string& table_name, const std::string& partition_tag, + const std::vector& record_array, std::vector& id_array) { Status status = Status::OK(); try { //////////////////////////////////////////////////////////////////////////// @@ -185,6 +185,7 @@ ClientProxy::Insert(const std::string& table_name, const std::vector& #else ::milvus::grpc::InsertParam insert_param; insert_param.set_table_name(table_name); + insert_param.set_partition_tag(partition_tag); for (auto& record : record_array) { ::milvus::grpc::RowRecord* grpc_record = insert_param.add_row_record_array(); @@ -215,15 +216,18 @@ ClientProxy::Insert(const std::string& table_name, const std::vector& } Status -ClientProxy::Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) { +ClientProxy::Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, + int64_t topk, int64_t nprobe, std::vector& topk_query_result_array) { try { // step 1: convert vectors data ::milvus::grpc::SearchParam search_param; search_param.set_table_name(table_name); search_param.set_topk(topk); search_param.set_nprobe(nprobe); + for (auto& tag : partiton_tags) { + search_param.add_partition_tag_array(tag); + } for (auto& record : query_record_array) { ::milvus::grpc::RowRecord* row_record = search_param.add_query_record_array(); for (auto& rec : record.data) { @@ -349,13 +353,13 @@ ClientProxy::DumpTaskTables() const { } Status -ClientProxy::DeleteByRange(milvus::Range& range, const std::string& table_name) { +ClientProxy::DeleteByDate(const std::string& table_name, const milvus::Range& range) { try { - ::milvus::grpc::DeleteByRangeParam delete_by_range_param; + ::milvus::grpc::DeleteByDateParam delete_by_range_param; delete_by_range_param.set_table_name(table_name); delete_by_range_param.mutable_range()->set_start_value(range.start_value); delete_by_range_param.mutable_range()->set_end_value(range.end_value); - return client_ptr_->DeleteByRange(delete_by_range_param); + return client_ptr_->DeleteByDate(delete_by_range_param); } catch (std::exception& ex) { return Status(StatusCode::UnknownError, "fail to delete by range: " + std::string(ex.what())); } @@ -401,4 +405,51 @@ ClientProxy::DropIndex(const std::string& table_name) const { } } +Status +ClientProxy::CreatePartition(const PartitionParam& partition_param) { + try { + ::milvus::grpc::PartitionParam grpc_partition_param; + grpc_partition_param.set_table_name(partition_param.table_name); + grpc_partition_param.set_partition_name(partition_param.partition_name); + grpc_partition_param.set_tag(partition_param.partition_tag); + Status status = client_ptr_->CreatePartition(grpc_partition_param); + return status; + } catch (std::exception& ex) { + return Status(StatusCode::UnknownError, "fail to create partition: " + std::string(ex.what())); + } +} + +Status +ClientProxy::ShowPartitions(const std::string& table_name, PartitionList& partition_array) const { + try { + ::milvus::grpc::TableName grpc_table_name; + grpc_table_name.set_table_name(table_name); + ::milvus::grpc::PartitionList grpc_partition_list; + Status status = client_ptr_->ShowPartitions(grpc_table_name, grpc_partition_list); + partition_array.resize(grpc_partition_list.partition_array_size()); + for (uint64_t i = 0; i < grpc_partition_list.partition_array_size(); ++i) { + partition_array[i].table_name = grpc_partition_list.partition_array(i).table_name(); + partition_array[i].partition_name = grpc_partition_list.partition_array(i).partition_name(); + partition_array[i].partition_tag = grpc_partition_list.partition_array(i).tag(); + } + return status; + } catch (std::exception& ex) { + return Status(StatusCode::UnknownError, "fail to show partitions: " + std::string(ex.what())); + } +} + +Status +ClientProxy::DropPartition(const PartitionParam& partition_param) { + try { + ::milvus::grpc::PartitionParam grpc_partition_param; + grpc_partition_param.set_table_name(partition_param.table_name); + grpc_partition_param.set_partition_name(partition_param.partition_name); + grpc_partition_param.set_tag(partition_param.partition_tag); + Status status = client_ptr_->DropPartition(grpc_partition_param); + return status; + } catch (std::exception& ex) { + return Status(StatusCode::UnknownError, "fail to drop partition: " + std::string(ex.what())); + } +} + } // namespace milvus diff --git a/core/src/sdk/grpc/ClientProxy.h b/core/src/sdk/grpc/ClientProxy.h index dbeacc1380..eb21e9c4b5 100644 --- a/core/src/sdk/grpc/ClientProxy.h +++ b/core/src/sdk/grpc/ClientProxy.h @@ -54,13 +54,13 @@ class ClientProxy : public Connection { CreateIndex(const IndexParam& index_param) override; Status - Insert(const std::string& table_name, const std::vector& record_array, + Insert(const std::string& table_name, const std::string& partition_tag, const std::vector& record_array, std::vector& id_array) override; Status - Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) override; + Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, int64_t topk, + int64_t nprobe, std::vector& topk_query_result_array) override; Status DescribeTable(const std::string& table_name, TableSchema& table_schema) override; @@ -84,7 +84,7 @@ class ClientProxy : public Connection { DumpTaskTables() const override; Status - DeleteByRange(Range& range, const std::string& table_name) override; + DeleteByDate(const std::string& table_name, const Range& range) override; Status PreloadTable(const std::string& table_name) const override; @@ -95,6 +95,15 @@ class ClientProxy : public Connection { Status DropIndex(const std::string& table_name) const override; + Status + CreatePartition(const PartitionParam& partition_param) override; + + Status + ShowPartitions(const std::string& table_name, PartitionList& partition_array) const override; + + Status + DropPartition(const PartitionParam& partition_param) override; + private: std::shared_ptr<::grpc::Channel> channel_; diff --git a/core/src/sdk/grpc/GrpcClient.cpp b/core/src/sdk/grpc/GrpcClient.cpp index 5c27c3b73f..29f378276d 100644 --- a/core/src/sdk/grpc/GrpcClient.cpp +++ b/core/src/sdk/grpc/GrpcClient.cpp @@ -259,13 +259,13 @@ GrpcClient::PreloadTable(milvus::grpc::TableName& table_name) { } Status -GrpcClient::DeleteByRange(grpc::DeleteByRangeParam& delete_by_range_param) { +GrpcClient::DeleteByDate(grpc::DeleteByDateParam& delete_by_range_param) { ClientContext context; ::milvus::grpc::Status response; - ::grpc::Status grpc_status = stub_->DeleteByRange(&context, delete_by_range_param, &response); + ::grpc::Status grpc_status = stub_->DeleteByDate(&context, delete_by_range_param, &response); if (!grpc_status.ok()) { - std::cerr << "DeleteByRange gRPC failed!" << std::endl; + std::cerr << "DeleteByDate gRPC failed!" << std::endl; return Status(StatusCode::RPCFailed, grpc_status.error_message()); } @@ -317,4 +317,57 @@ GrpcClient::DropIndex(grpc::TableName& table_name) { return Status::OK(); } +Status +GrpcClient::CreatePartition(const grpc::PartitionParam& partition_param) { + ClientContext context; + ::milvus::grpc::Status response; + ::grpc::Status grpc_status = stub_->CreatePartition(&context, partition_param, &response); + + if (!grpc_status.ok()) { + std::cerr << "CreatePartition gRPC failed!" << std::endl; + return Status(StatusCode::RPCFailed, grpc_status.error_message()); + } + + if (response.error_code() != grpc::SUCCESS) { + std::cerr << response.reason() << std::endl; + return Status(StatusCode::ServerFailed, response.reason()); + } + return Status::OK(); +} + +Status +GrpcClient::ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const { + ClientContext context; + ::grpc::Status grpc_status = stub_->ShowPartitions(&context, table_name, &partition_array); + + if (!grpc_status.ok()) { + std::cerr << "ShowPartitions gRPC failed!" << std::endl; + return Status(StatusCode::RPCFailed, grpc_status.error_message()); + } + + if (partition_array.status().error_code() != grpc::SUCCESS) { + std::cerr << partition_array.status().reason() << std::endl; + return Status(StatusCode::ServerFailed, partition_array.status().reason()); + } + return Status::OK(); +} + +Status +GrpcClient::DropPartition(const ::milvus::grpc::PartitionParam& partition_param) { + ClientContext context; + ::milvus::grpc::Status response; + ::grpc::Status grpc_status = stub_->DropPartition(&context, partition_param, &response); + + if (!grpc_status.ok()) { + std::cerr << "DropPartition gRPC failed!" << std::endl; + return Status(StatusCode::RPCFailed, grpc_status.error_message()); + } + + if (response.error_code() != grpc::SUCCESS) { + std::cerr << response.reason() << std::endl; + return Status(StatusCode::ServerFailed, response.reason()); + } + return Status::OK(); +} + } // namespace milvus diff --git a/core/src/sdk/grpc/GrpcClient.h b/core/src/sdk/grpc/GrpcClient.h index d2e6ae5095..8599f8a53f 100644 --- a/core/src/sdk/grpc/GrpcClient.h +++ b/core/src/sdk/grpc/GrpcClient.h @@ -72,7 +72,7 @@ class GrpcClient { Cmd(std::string& result, const std::string& cmd); Status - DeleteByRange(grpc::DeleteByRangeParam& delete_by_range_param); + DeleteByDate(grpc::DeleteByDateParam& delete_by_range_param); Status PreloadTable(grpc::TableName& table_name); @@ -83,6 +83,15 @@ class GrpcClient { Status DropIndex(grpc::TableName& table_name); + Status + CreatePartition(const grpc::PartitionParam& partition_param); + + Status + ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const; + + Status + DropPartition(const ::milvus::grpc::PartitionParam& partition_param); + Status Disconnect(); diff --git a/core/src/sdk/include/MilvusApi.h b/core/src/sdk/include/MilvusApi.h index 68fe0e9d5c..8c92375649 100644 --- a/core/src/sdk/include/MilvusApi.h +++ b/core/src/sdk/include/MilvusApi.h @@ -64,7 +64,7 @@ struct TableSchema { /** * @brief Range information - * for DATE partition, the format is like: 'year-month-day' + * for DATE range, the format is like: 'year-month-day' */ struct Range { std::string start_value; ///< Range start @@ -102,6 +102,17 @@ struct IndexParam { int32_t nlist; }; +/** + * @brief partition parameters + */ +struct PartitionParam { + std::string table_name; + std::string partition_name; + std::string partition_tag; +}; + +using PartitionList = std::vector; + /** * @brief SDK main class */ @@ -195,7 +206,7 @@ class Connection { * * This method is used to create table * - * @param table_name, table name is going to be tested. + * @param table_name, target table's name. * * @return Indicate if table is cexist */ @@ -205,9 +216,9 @@ class Connection { /** * @brief Delete table method * - * This method is used to delete table. + * This method is used to delete table(and its partitions). * - * @param table_name, table name is going to be deleted. + * @param table_name, target table's name. * * @return Indicate if table is delete successfully. */ @@ -217,7 +228,7 @@ class Connection { /** * @brief Create index method * - * This method is used to create index for whole table + * This method is used to create index for whole table(and its partitions). * * @param IndexParam * table_name, table name is going to be create index. @@ -235,14 +246,15 @@ class Connection { * * This method is used to add vector array to table. * - * @param table_name, table_name is inserted. + * @param table_name, target table's name. + * @param partition_tag, target partition's tag, keep empty if no partition. * @param record_array, vector array is inserted. * @param id_array, after inserted every vector is given a id. * * @return Indicate if vector array are inserted successfully */ virtual Status - Insert(const std::string& table_name, const std::vector& record_array, + Insert(const std::string& table_name, const std::string& partition_tag, const std::vector& record_array, std::vector& id_array) = 0; /** @@ -250,7 +262,8 @@ class Connection { * * This method is used to query vector in table. * - * @param table_name, table_name is queried. + * @param table_name, target table's name, keep empty if no partition. + * @param partition_tags, target partitions. * @param query_record_array, all vector are going to be queried. * @param query_range_array, time ranges, if not specified, will search in whole table * @param topk, how many similarity vectors will be searched. @@ -259,16 +272,16 @@ class Connection { * @return Indicate if query is successful. */ virtual Status - Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) = 0; + Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, int64_t topk, + int64_t nprobe, std::vector& topk_query_result_array) = 0; /** * @brief Show table description * * This method is used to show table information. * - * @param table_name, which table is show. + * @param table_name, target table's name. * @param table_schema, table_schema is given when operation is successful. * * @return Indicate if this operation is successful. @@ -281,8 +294,8 @@ class Connection { * * This method is used to get table row count. * - * @param table_name, table's name. - * @param row_count, table total row count. + * @param table_name, target table's name. + * @param row_count, table total row count(including partitions). * * @return Indicate if this operation is successful. */ @@ -331,21 +344,28 @@ class Connection { virtual std::string ServerStatus() const = 0; + /** + * @brief dump server tasks information + * + * This method is internal used. + * + * @return Server status. + */ virtual std::string DumpTaskTables() const = 0; /** - * @brief delete tables by range + * @brief delete tables by date range * - * This method is used to delete tables by range. + * This method is used to delete table data by date range. * + * @param table_name, target table's name. * @param Range, table range to delete. - * @param table_name * * @return Indicate if this operation is successful. */ virtual Status - DeleteByRange(Range& range, const std::string& table_name) = 0; + DeleteByDate(const std::string& table_name, const Range& range) = 0; /** * @brief preload table @@ -364,9 +384,10 @@ class Connection { * * This method is used to describe index * - * @param table_name + * @param table_name, target table's name. + * @param index_param, returned index information. * - * @return index informations and indicate if this operation is successful. + * @return Indicate if this operation is successful. */ virtual Status DescribeIndex(const std::string& table_name, IndexParam& index_param) const = 0; @@ -374,14 +395,53 @@ class Connection { /** * @brief drop index * - * This method is used to drop index + * This method is used to drop index of table(and its partitions) * - * @param table_name + * @param table_name, target table's name. * * @return Indicate if this operation is successful. */ virtual Status DropIndex(const std::string& table_name) const = 0; + + /** + * @brief Create partition method + * + * This method is used to create table partition + * + * @param param, use to provide partition information to be created. + * + * @return Indicate if partition is created successfully + */ + virtual Status + CreatePartition(const PartitionParam& param) = 0; + + /** + * @brief Test table existence method + * + * This method is used to create table + * + * @param table_name, table name is going to be tested. + * @param partition_array, partition array of the table. + * + * @return Indicate if this operation is successful + */ + virtual Status + ShowPartitions(const std::string& table_name, PartitionList& partition_array) const = 0; + + /** + * @brief Delete partition method + * + * This method is used to delete table partition. + * + * @param param, target partition to be deleted. + * NOTE: if param.table_name is empty, you must specify param.partition_name, + * else you can specify param.table_name and param.tag and let the param.partition_name be empty + * + * @return Indicate if partition is delete successfully. + */ + virtual Status + DropPartition(const PartitionParam& param) = 0; }; } // namespace milvus diff --git a/core/src/sdk/interface/ConnectionImpl.cpp b/core/src/sdk/interface/ConnectionImpl.cpp index 7034ce4a4d..04531b46eb 100644 --- a/core/src/sdk/interface/ConnectionImpl.cpp +++ b/core/src/sdk/interface/ConnectionImpl.cpp @@ -83,16 +83,16 @@ ConnectionImpl::CreateIndex(const IndexParam& index_param) { } Status -ConnectionImpl::Insert(const std::string& table_name, const std::vector& record_array, - std::vector& id_array) { - return client_proxy_->Insert(table_name, record_array, id_array); +ConnectionImpl::Insert(const std::string& table_name, const std::string& partition_tag, + const std::vector& record_array, std::vector& id_array) { + return client_proxy_->Insert(table_name, partition_tag, record_array, id_array); } Status -ConnectionImpl::Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) { - return client_proxy_->Search(table_name, query_record_array, query_range_array, topk, nprobe, +ConnectionImpl::Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, + int64_t topk, int64_t nprobe, std::vector& topk_query_result_array) { + return client_proxy_->Search(table_name, partiton_tags, query_record_array, query_range_array, topk, nprobe, topk_query_result_array); } @@ -127,8 +127,8 @@ ConnectionImpl::DumpTaskTables() const { } Status -ConnectionImpl::DeleteByRange(Range& range, const std::string& table_name) { - return client_proxy_->DeleteByRange(range, table_name); +ConnectionImpl::DeleteByDate(const std::string& table_name, const Range& range) { + return client_proxy_->DeleteByDate(table_name, range); } Status @@ -146,4 +146,19 @@ ConnectionImpl::DropIndex(const std::string& table_name) const { return client_proxy_->DropIndex(table_name); } +Status +ConnectionImpl::CreatePartition(const PartitionParam& param) { + return client_proxy_->CreatePartition(param); +} + +Status +ConnectionImpl::ShowPartitions(const std::string& table_name, PartitionList& partition_array) const { + return client_proxy_->ShowPartitions(table_name, partition_array); +} + +Status +ConnectionImpl::DropPartition(const PartitionParam& param) { + return client_proxy_->DropPartition(param); +} + } // namespace milvus diff --git a/core/src/sdk/interface/ConnectionImpl.h b/core/src/sdk/interface/ConnectionImpl.h index 6bc3432bc4..199d22bf9d 100644 --- a/core/src/sdk/interface/ConnectionImpl.h +++ b/core/src/sdk/interface/ConnectionImpl.h @@ -56,13 +56,13 @@ class ConnectionImpl : public Connection { CreateIndex(const IndexParam& index_param) override; Status - Insert(const std::string& table_name, const std::vector& record_array, + Insert(const std::string& table_name, const std::string& partition_tag, const std::vector& record_array, std::vector& id_array) override; Status - Search(const std::string& table_name, const std::vector& query_record_array, - const std::vector& query_range_array, int64_t topk, int64_t nprobe, - std::vector& topk_query_result_array) override; + Search(const std::string& table_name, const std::vector& partiton_tags, + const std::vector& query_record_array, const std::vector& query_range_array, int64_t topk, + int64_t nprobe, std::vector& topk_query_result_array) override; Status DescribeTable(const std::string& table_name, TableSchema& table_schema) override; @@ -86,7 +86,7 @@ class ConnectionImpl : public Connection { DumpTaskTables() const override; Status - DeleteByRange(Range& range, const std::string& table_name) override; + DeleteByDate(const std::string& table_name, const Range& range) override; Status PreloadTable(const std::string& table_name) const override; @@ -97,6 +97,15 @@ class ConnectionImpl : public Connection { Status DropIndex(const std::string& table_name) const override; + Status + CreatePartition(const PartitionParam& param) override; + + Status + ShowPartitions(const std::string& table_name, PartitionList& partition_array) const override; + + Status + DropPartition(const PartitionParam& param) override; + private: std::shared_ptr client_proxy_; }; diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.cpp b/core/src/server/grpc_impl/GrpcRequestHandler.cpp index a9ee3d77d0..bb38349b4a 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.cpp +++ b/core/src/server/grpc_impl/GrpcRequestHandler.cpp @@ -150,9 +150,9 @@ GrpcRequestHandler::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Co } ::grpc::Status -GrpcRequestHandler::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, - ::milvus::grpc::Status* response) { - BaseTaskPtr task_ptr = DeleteByRangeTask::Create(request); +GrpcRequestHandler::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, + ::milvus::grpc::Status* response) { + BaseTaskPtr task_ptr = DeleteByDateTask::Create(request); ::milvus::grpc::Status grpc_status; GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status); response->set_error_code(grpc_status.error_code()); @@ -193,6 +193,36 @@ GrpcRequestHandler::DropIndex(::grpc::ServerContext* context, const ::milvus::gr return ::grpc::Status::OK; } +::grpc::Status +GrpcRequestHandler::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) { + BaseTaskPtr task_ptr = CreatePartitionTask::Create(request); + GrpcRequestScheduler::ExecTask(task_ptr, response); + return ::grpc::Status::OK; +} + +::grpc::Status +GrpcRequestHandler::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::PartitionList* response) { + BaseTaskPtr task_ptr = ShowPartitionsTask::Create(request->table_name(), response); + ::milvus::grpc::Status grpc_status; + GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status); + response->mutable_status()->set_reason(grpc_status.reason()); + response->mutable_status()->set_error_code(grpc_status.error_code()); + return ::grpc::Status::OK; +} + +::grpc::Status +GrpcRequestHandler::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) { + BaseTaskPtr task_ptr = DropPartitionTask::Create(request); + ::milvus::grpc::Status grpc_status; + GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status); + response->set_reason(grpc_status.reason()); + response->set_error_code(grpc_status.error_code()); + return ::grpc::Status::OK; +} + } // namespace grpc } // namespace server } // namespace milvus diff --git a/core/src/server/grpc_impl/GrpcRequestHandler.h b/core/src/server/grpc_impl/GrpcRequestHandler.h index 1a9b591154..11a7efbb98 100644 --- a/core/src/server/grpc_impl/GrpcRequestHandler.h +++ b/core/src/server/grpc_impl/GrpcRequestHandler.h @@ -28,296 +28,168 @@ namespace server { namespace grpc { class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service { public: - /** - * @brief Create table method - * - * This method is used to create table - * - * @param context, add context for every RPC - * @param request, used to provide table information to be created. - * @param response, used to get the status - * - * @return status - * - * @param request - * @param response - * @param context - */ + // * + // @brief This method is used to create table + // + // @param TableSchema, use to provide table information to be created. + // + // @return Status ::grpc::Status CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request, ::milvus::grpc::Status* response) override; - - /** - * @brief Test table existence method - * - * This method is used to test table existence. - * - * @param context, add context for every RPC - * @param request, table name is going to be tested. - * @param response, get the bool reply of hastable - * - * @return status - * - * @param request - * @param response - * @param context - */ + // * + // @brief This method is used to test table existence. + // + // @param TableName, table name is going to be tested. + // + // @return BoolReply ::grpc::Status HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::BoolReply* response) override; - - /** - * @brief Drop table method - * - * This method is used to drop table. - * - * @param context, add context for every RPC - * @param request, table name is going to be deleted. - * @param response, get the status of droptable - * - * @return status - * - * @param request - * @param response - * @param context - */ - ::grpc::Status - DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief build index by table method - * - * This method is used to build index by table in sync. - * - * @param context, add context for every RPC - * @param request, table name is going to be built index. - * @param response, get the status of buildindex - * - * @return status - * - * @param request - * @param response - * @param context - */ - ::grpc::Status - CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief Insert vector array to table - * - * This method is used to insert vector array to table. - * - * @param context, add context for every RPC - * @param request, table_name is inserted. - * @param response, vector array is inserted. - * - * @return status - * - * @param context - * @param request - * @param response - */ - ::grpc::Status - Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, - ::milvus::grpc::VectorIds* response) override; - - /** - * @brief Query vector - * - * This method is used to query vector in table. - * - * @param context, add context for every RPC - * @param request: - * table_name, table_name is queried. - * query_record_array, all vector are going to be queried. - * query_range_array, optional ranges for conditional search. If not specified, search whole table - * topk, how many similarity vectors will be searched. - * - * @param writer, write query result array. - * - * @return status - * - * @param context - * @param request - * @param writer - */ - ::grpc::Status - Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, - ::milvus::grpc::TopKQueryResultList* response) override; - - /** - * @brief Internal use query interface - * - * This method is used to query vector in specified files. - * - * @param context, add context for every RPC - * @param request: - * file_id_array, specified files id array, queried. - * query_record_array, all vector are going to be queried. - * query_range_array, optional ranges for conditional search. If not specified, search whole table - * topk, how many similarity vectors will be searched. - * - * @param writer, write query result array. - * - * @return status - * - * @param context - * @param request - * @param writer - */ - ::grpc::Status - SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, - ::milvus::grpc::TopKQueryResultList* response) override; - - /** - * @brief Get table schema - * - * This method is used to get table schema. - * - * @param context, add context for every RPC - * @param request, target table name. - * @param response, table schema - * - * @return status - * - * @param context - * @param request - * @param response - */ + // * + // @brief This method is used to get table schema. + // + // @param TableName, target table name. + // + // @return TableSchema ::grpc::Status DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) override; - - /** - * @brief Get table row count - * - * This method is used to get table row count. - * - * @param context, add context for every RPC - * @param request, target table name. - * @param response, table row count - * - * @return table row count - * - * @param request - * @param response - * @param context - */ + // * + // @brief This method is used to get table schema. + // + // @param TableName, target table name. + // + // @return TableRowCount ::grpc::Status CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableRowCount* response) override; - - /** - * @brief List all tables in database - * - * This method is used to list all tables. - * - * @param context, add context for every RPC - * @param request, show table command, usually not use - * @param writer, write tables to client - * - * @return status - * - * @param context - * @param request - * @param writer - */ + // * + // @brief This method is used to list all tables. + // + // @param Command, dummy parameter. + // + // @return TableNameList ::grpc::Status ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::TableNameList* response) override; - - /** - * @brief Give the server status - * - * - * This method is used to give the server status. - * @param context, add context for every RPC - * @param request, give server command - * @param response, server status - * - * @return status - * - * @param context - * @param request - * @param response - */ + // * + // @brief This method is used to delete table. + // + // @param TableName, table name is going to be deleted. + // + // @return TableNameList ::grpc::Status - Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, - ::milvus::grpc::StringReply* response) override; - - /** - * @brief delete table by range - * - * This method is used to delete table by range. - * @param context, add context for every RPC - * @param request, table name and range - * @param response, status - * - * @return status - * - * @param context - * @param request - * @param response - */ + DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to build index by table in sync mode. + // + // @param IndexParam, index paramters. + // + // @return Status ::grpc::Status - DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief preload table - * - * This method is used to preload table. - * @param context, add context for every RPC - * @param request, table name - * @param response, status - * - * @return status - * - * @param context - * @param request - * @param response - */ - ::grpc::Status - PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, - ::milvus::grpc::Status* response) override; - - /** - * @brief Describe index - * - * This method is used to describe index. - * @param context, add context for every RPC - * @param request, table name - * @param response, index informations - * - * @return status - * - * @param context - * @param request - * @param response - */ + CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to describe index + // + // @param TableName, target table name. + // + // @return IndexParam ::grpc::Status DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::IndexParam* response) override; - - /** - * @brief Drop index - * - * This method is used to drop index. - * @param context, add context for every RPC - * @param request, table name - * @param response, status - * - * @return status - * - * @param context - * @param request - * @param response - */ + // * + // @brief This method is used to drop index + // + // @param TableName, target table name. + // + // @return Status ::grpc::Status DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to create partition + // + // @param PartitionParam, partition parameters. + // + // @return Status + ::grpc::Status + CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to show partition information + // + // @param TableName, target table name. + // + // @return PartitionList + ::grpc::Status + ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::PartitionList* response) override; + // * + // @brief This method is used to drop partition + // + // @param PartitionName, target partition name. + // + // @return Status + ::grpc::Status + DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to add vector array to table. + // + // @param InsertParam, insert parameters. + // + // @return VectorIds + ::grpc::Status + Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, + ::milvus::grpc::VectorIds* response) override; + // * + // @brief This method is used to query vector in table. + // + // @param SearchParam, search parameters. + // + // @return TopKQueryResultList + ::grpc::Status + Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, + ::milvus::grpc::TopKQueryResultList* response) override; + // * + // @brief This method is used to query vector in specified files. + // + // @param SearchInFilesParam, search in files paremeters. + // + // @return TopKQueryResultList + ::grpc::Status + SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, + ::milvus::grpc::TopKQueryResultList* response) override; + // * + // @brief This method is used to give the server status. + // + // @param Command, command string + // + // @return StringReply + ::grpc::Status + Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, + ::milvus::grpc::StringReply* response) override; + // * + // @brief This method is used to delete vector by date range + // + // @param DeleteByDateParam, delete parameters. + // + // @return status + ::grpc::Status + DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, + ::milvus::grpc::Status* response) override; + // * + // @brief This method is used to preload table + // + // @param TableName, target table name. + // + // @return Status + ::grpc::Status + PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, + ::milvus::grpc::Status* response) override; }; } // namespace grpc diff --git a/core/src/server/grpc_impl/GrpcRequestTask.cpp b/core/src/server/grpc_impl/GrpcRequestTask.cpp index 0816d45750..960b826635 100644 --- a/core/src/server/grpc_impl/GrpcRequestTask.cpp +++ b/core/src/server/grpc_impl/GrpcRequestTask.cpp @@ -366,7 +366,7 @@ DropTableTask::OnExecute() { // step 3: Drop table std::vector dates; - status = DBWrapper::DB()->DeleteTable(table_name_, dates); + status = DBWrapper::DB()->DropTable(table_name_, dates); if (!status.ok()) { return status; } @@ -505,7 +505,8 @@ InsertTask::OnExecute() { memcpy(target_data, src_data, static_cast(sizeof(int64_t) * insert_param_->row_id_array_size())); } - status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), vec_count, vec_f.data(), vec_ids); + status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), insert_param_->partition_tag(), vec_count, + vec_f.data(), vec_ids); rc.ElapseFromBegin("add vectors to engine"); if (!status.ok()) { return status; @@ -637,7 +638,8 @@ SearchTask::OnExecute() { rc.RecordSection("prepare vector data"); // step 6: search vectors - engine::QueryResults results; + engine::ResultIds result_ids; + engine::ResultDistances result_distances; auto record_count = (uint64_t)search_param_->query_record_array().size(); #ifdef MILVUS_ENABLE_PROFILING @@ -647,11 +649,21 @@ SearchTask::OnExecute() { #endif if (file_id_array_.empty()) { - status = - DBWrapper::DB()->Query(table_name_, (size_t)top_k, record_count, nprobe, vec_f.data(), dates, results); + std::vector partition_tags; + for (size_t i = 0; i < search_param_->partition_tag_array_size(); i++) { + partition_tags.emplace_back(search_param_->partition_tag_array(i)); + } + + status = ValidationUtil::ValidatePartitionTags(partition_tags); + if (!status.ok()) { + return status; + } + + status = DBWrapper::DB()->Query(table_name_, partition_tags, (size_t)top_k, record_count, nprobe, + vec_f.data(), dates, result_ids, result_distances); } else { - status = DBWrapper::DB()->Query(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe, - vec_f.data(), dates, results); + status = DBWrapper::DB()->QueryByFileID(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe, + vec_f.data(), dates, result_ids, result_distances); } #ifdef MILVUS_ENABLE_PROFILING @@ -663,23 +675,20 @@ SearchTask::OnExecute() { return status; } - if (results.empty()) { + if (result_ids.empty()) { return Status::OK(); // empty table } - if (results.size() != record_count) { - std::string msg = "Search " + std::to_string(record_count) + " vectors but only return " + - std::to_string(results.size()) + " results"; - return Status(SERVER_ILLEGAL_SEARCH_RESULT, msg); - } + size_t result_k = result_ids.size() / record_count; // step 7: construct result array - for (auto& result : results) { + for (size_t i = 0; i < record_count; i++) { ::milvus::grpc::TopKQueryResult* topk_query_result = topk_result_list->add_topk_query_result(); - for (auto& pair : result) { + for (size_t j = 0; j < result_k; j++) { ::milvus::grpc::QueryResult* grpc_result = topk_query_result->add_query_result_arrays(); - grpc_result->set_id(pair.first); - grpc_result->set_distance(pair.second); + size_t idx = i * result_k + j; + grpc_result->set_id(result_ids[idx]); + grpc_result->set_distance(result_distances[idx]); } } @@ -759,22 +768,22 @@ CmdTask::OnExecute() { } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -DeleteByRangeTask::DeleteByRangeTask(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param) +DeleteByDateTask::DeleteByDateTask(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param) : GrpcBaseTask(DDL_DML_TASK_GROUP), delete_by_range_param_(delete_by_range_param) { } BaseTaskPtr -DeleteByRangeTask::Create(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param) { +DeleteByDateTask::Create(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param) { if (delete_by_range_param == nullptr) { SERVER_LOG_ERROR << "grpc input is null!"; return nullptr; } - return std::shared_ptr(new DeleteByRangeTask(delete_by_range_param)); + return std::shared_ptr(new DeleteByDateTask(delete_by_range_param)); } Status -DeleteByRangeTask::OnExecute() { +DeleteByDateTask::OnExecute() { try { TimeRecorder rc("DeleteByRangeTask"); @@ -815,7 +824,7 @@ DeleteByRangeTask::OnExecute() { std::string fname = "/tmp/search_nq_" + this->delete_by_range_param_->table_name() + ".profiling"; ProfilerStart(fname.c_str()); #endif - status = DBWrapper::DB()->DeleteTable(table_name, dates); + status = DBWrapper::DB()->DropTable(table_name, dates); if (!status.ok()) { return status; } @@ -946,6 +955,119 @@ DropIndexTask::OnExecute() { return Status::OK(); } +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +CreatePartitionTask::CreatePartitionTask(const ::milvus::grpc::PartitionParam* partition_param) + : GrpcBaseTask(DDL_DML_TASK_GROUP), partition_param_(partition_param) { +} + +BaseTaskPtr +CreatePartitionTask::Create(const ::milvus::grpc::PartitionParam* partition_param) { + if (partition_param == nullptr) { + SERVER_LOG_ERROR << "grpc input is null!"; + return nullptr; + } + return std::shared_ptr(new CreatePartitionTask(partition_param)); +} + +Status +CreatePartitionTask::OnExecute() { + TimeRecorder rc("CreatePartitionTask"); + + try { + // step 1: check arguments + auto status = ValidationUtil::ValidateTableName(partition_param_->table_name()); + if (!status.ok()) { + return status; + } + + status = ValidationUtil::ValidateTableName(partition_param_->partition_name()); + if (!status.ok()) { + return status; + } + + status = ValidationUtil::ValidatePartitionTags({partition_param_->tag()}); + if (!status.ok()) { + return status; + } + + // step 2: create partition + status = DBWrapper::DB()->CreatePartition(partition_param_->table_name(), partition_param_->partition_name(), + partition_param_->tag()); + if (!status.ok()) { + // partition could exist + if (status.code() == DB_ALREADY_EXIST) { + return Status(SERVER_INVALID_TABLE_NAME, status.message()); + } + return status; + } + } catch (std::exception& ex) { + return Status(SERVER_UNEXPECTED_ERROR, ex.what()); + } + + rc.ElapseFromBegin("totally cost"); + + return Status::OK(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +ShowPartitionsTask::ShowPartitionsTask(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list) + : GrpcBaseTask(INFO_TASK_GROUP), table_name_(table_name), partition_list_(partition_list) { +} + +BaseTaskPtr +ShowPartitionsTask::Create(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list) { + return std::shared_ptr(new ShowPartitionsTask(table_name, partition_list)); +} + +Status +ShowPartitionsTask::OnExecute() { + std::vector schema_array; + auto statuts = DBWrapper::DB()->ShowPartitions(table_name_, schema_array); + if (!statuts.ok()) { + return statuts; + } + + for (auto& schema : schema_array) { + ::milvus::grpc::PartitionParam* param = partition_list_->add_partition_array(); + param->set_table_name(schema.owner_table_); + param->set_partition_name(schema.table_id_); + param->set_tag(schema.partition_tag_); + } + return Status::OK(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +DropPartitionTask::DropPartitionTask(const ::milvus::grpc::PartitionParam* partition_param) + : GrpcBaseTask(DDL_DML_TASK_GROUP), partition_param_(partition_param) { +} + +BaseTaskPtr +DropPartitionTask::Create(const ::milvus::grpc::PartitionParam* partition_param) { + return std::shared_ptr(new DropPartitionTask(partition_param)); +} + +Status +DropPartitionTask::OnExecute() { + if (!partition_param_->partition_name().empty()) { + auto status = ValidationUtil::ValidateTableName(partition_param_->partition_name()); + if (!status.ok()) { + return status; + } + return DBWrapper::DB()->DropPartition(partition_param_->partition_name()); + } else { + auto status = ValidationUtil::ValidateTableName(partition_param_->table_name()); + if (!status.ok()) { + return status; + } + + status = ValidationUtil::ValidatePartitionTags({partition_param_->tag()}); + if (!status.ok()) { + return status; + } + return DBWrapper::DB()->DropPartitionByTag(partition_param_->table_name(), partition_param_->tag()); + } +} + } // namespace grpc } // namespace server } // namespace milvus diff --git a/core/src/server/grpc_impl/GrpcRequestTask.h b/core/src/server/grpc_impl/GrpcRequestTask.h index ad2828ebf3..6f8e66af43 100644 --- a/core/src/server/grpc_impl/GrpcRequestTask.h +++ b/core/src/server/grpc_impl/GrpcRequestTask.h @@ -203,19 +203,19 @@ class CmdTask : public GrpcBaseTask { }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -class DeleteByRangeTask : public GrpcBaseTask { +class DeleteByDateTask : public GrpcBaseTask { public: static BaseTaskPtr - Create(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param); + Create(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param); protected: - explicit DeleteByRangeTask(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param); + explicit DeleteByDateTask(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param); Status OnExecute() override; private: - const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param_; + const ::milvus::grpc::DeleteByDateParam* delete_by_range_param_; }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -267,6 +267,55 @@ class DropIndexTask : public GrpcBaseTask { std::string table_name_; }; +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class CreatePartitionTask : public GrpcBaseTask { + public: + static BaseTaskPtr + Create(const ::milvus::grpc::PartitionParam* partition_param); + + protected: + explicit CreatePartitionTask(const ::milvus::grpc::PartitionParam* partition_param); + + Status + OnExecute() override; + + private: + const ::milvus::grpc::PartitionParam* partition_param_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class ShowPartitionsTask : public GrpcBaseTask { + public: + static BaseTaskPtr + Create(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list); + + protected: + ShowPartitionsTask(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list); + + Status + OnExecute() override; + + private: + std::string table_name_; + ::milvus::grpc::PartitionList* partition_list_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class DropPartitionTask : public GrpcBaseTask { + public: + static BaseTaskPtr + Create(const ::milvus::grpc::PartitionParam* partition_param); + + protected: + explicit DropPartitionTask(const ::milvus::grpc::PartitionParam* partition_param); + + Status + OnExecute() override; + + private: + const ::milvus::grpc::PartitionParam* partition_param_; +}; + } // namespace grpc } // namespace server } // namespace milvus diff --git a/core/src/utils/StringHelpFunctions.cpp b/core/src/utils/StringHelpFunctions.cpp index 230cc1a0ff..2436fb7ad9 100644 --- a/core/src/utils/StringHelpFunctions.cpp +++ b/core/src/utils/StringHelpFunctions.cpp @@ -17,6 +17,7 @@ #include "utils/StringHelpFunctions.h" +#include #include namespace milvus { @@ -122,5 +123,22 @@ StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::strin return Status::OK(); } +bool +StringHelpFunctions::IsRegexMatch(const std::string& target_str, const std::string& pattern_str) { + // if target_str equals pattern_str, return true + if (target_str == pattern_str) { + return true; + } + + // regex match + std::regex pattern(pattern_str); + std::smatch results; + if (std::regex_search(target_str, results, pattern)) { + return true; + } else { + return false; + } +} + } // namespace server } // namespace milvus diff --git a/core/src/utils/StringHelpFunctions.h b/core/src/utils/StringHelpFunctions.h index cb355332f1..81d02d1383 100644 --- a/core/src/utils/StringHelpFunctions.h +++ b/core/src/utils/StringHelpFunctions.h @@ -56,6 +56,11 @@ class StringHelpFunctions { static Status SplitStringByQuote(const std::string& str, const std::string& delimeter, const std::string& quote, std::vector& result); + + // std regex match function + // regex grammar reference: http://www.cplusplus.com/reference/regex/ECMAScript/ + static bool + IsRegexMatch(const std::string& target_str, const std::string& pattern); }; } // namespace server diff --git a/core/src/utils/ValidationUtil.cpp b/core/src/utils/ValidationUtil.cpp index dc2604813f..ec696ff3e0 100644 --- a/core/src/utils/ValidationUtil.cpp +++ b/core/src/utils/ValidationUtil.cpp @@ -168,6 +168,19 @@ ValidationUtil::ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSc return Status::OK(); } +Status +ValidationUtil::ValidatePartitionTags(const std::vector& partition_tags) { + for (auto& tag : partition_tags) { + if (tag.empty()) { + std::string msg = "Invalid partition tag: " + tag + ". " + "Partition tag should not be empty."; + SERVER_LOG_ERROR << msg; + return Status(SERVER_INVALID_NPROBE, msg); + } + } + + return Status::OK(); +} + Status ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) { #ifdef MILVUS_GPU_VERSION diff --git a/core/src/utils/ValidationUtil.h b/core/src/utils/ValidationUtil.h index 7b24c93fb5..01801e295a 100644 --- a/core/src/utils/ValidationUtil.h +++ b/core/src/utils/ValidationUtil.h @@ -21,6 +21,7 @@ #include "utils/Status.h" #include +#include namespace milvus { namespace server { @@ -54,6 +55,9 @@ class ValidationUtil { static Status ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSchema& table_schema); + static Status + ValidatePartitionTags(const std::vector& partition_tags); + static Status ValidateGpuIndex(uint32_t gpu_index); diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index 7bcc21f7ee..e485bd729a 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -77,6 +77,7 @@ set(helper_files ${MILVUS_ENGINE_SRC}/utils/CommonUtil.cpp ${MILVUS_ENGINE_SRC}/utils/TimeRecorder.cpp ${MILVUS_ENGINE_SRC}/utils/Status.cpp + ${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp ${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc ) diff --git a/core/unittest/db/test_db.cpp b/core/unittest/db/test_db.cpp index 42dc8dec82..d8614dd5d1 100644 --- a/core/unittest/db/test_db.cpp +++ b/core/unittest/db/test_db.cpp @@ -171,7 +171,8 @@ TEST_F(DBTest, DB_TEST) { BuildVectors(qb, qxb); std::thread search([&]() { - milvus::engine::QueryResults results; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -186,17 +187,19 @@ TEST_F(DBTest, DB_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results); + + std::vector tags; + stat = db_->Query(TABLE_NAME, tags, k, qb, 10, qxb.data(), result_ids, result_distances); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto k = 0; k < qb; ++k) { - ASSERT_EQ(results[k][0].first, target_ids[k]); + for (auto i = 0; i < qb; ++i) { + ASSERT_EQ(result_ids[i*k], target_ids[i]); ss.str(""); - ss << "Result [" << k << "]:"; - for (auto result : results[k]) { - ss << result.first << " "; + ss << "Result [" << i << "]:"; + for (auto t = 0; t < k; t++) { + ss << result_ids[i * k + t] << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -209,10 +212,10 @@ TEST_F(DBTest, DB_TEST) { for (auto i = 0; i < loop; ++i) { if (i == 40) { - db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); + db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); } else { - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); } std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -270,7 +273,7 @@ TEST_F(DBTest, SEARCH_TEST) { // insert data const int batch_size = 100; for (int j = 0; j < nb / batch_size; ++j) { - stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); + stat = db_->InsertVectors(TABLE_NAME, "", batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); if (j == 200) { sleep(1); } @@ -282,16 +285,19 @@ TEST_F(DBTest, SEARCH_TEST) { db_->CreateIndex(TABLE_NAME, index); // wait until build index finish { - milvus::engine::QueryResults results; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); + std::vector tags; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, k, nq, 10, xq.data(), result_ids, result_distances); ASSERT_TRUE(stat.ok()); } { // search by specify index file milvus::engine::meta::DatesT dates; std::vector file_ids = {"1", "2", "3", "4", "5", "6"}; - milvus::engine::QueryResults results; - stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results); + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->QueryByFileID(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, result_ids, result_distances); ASSERT_TRUE(stat.ok()); } @@ -340,7 +346,7 @@ TEST_F(DBTest, PRELOADTABLE_TEST) { int loop = 5; for (auto i = 0; i < loop; ++i) { milvus::engine::IDNumbers vector_ids; - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids.size(), nb); } @@ -370,7 +376,7 @@ TEST_F(DBTest, SHUTDOWN_TEST) { ASSERT_FALSE(stat.ok()); milvus::engine::IDNumbers ids; - stat = db_->InsertVectors(table_info.table_id_, 0, nullptr, ids); + stat = db_->InsertVectors(table_info.table_id_, "", 0, nullptr, ids); ASSERT_FALSE(stat.ok()); stat = db_->PreloadTable(table_info.table_id_); @@ -387,15 +393,17 @@ TEST_F(DBTest, SHUTDOWN_TEST) { stat = db_->DescribeIndex(table_info.table_id_, index); ASSERT_FALSE(stat.ok()); + std::vector tags; milvus::engine::meta::DatesT dates; - milvus::engine::QueryResults results; - stat = db_->Query(table_info.table_id_, 1, 1, 1, nullptr, dates, results); + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(table_info.table_id_, tags, 1, 1, 1, nullptr, dates, result_ids, result_distances); ASSERT_FALSE(stat.ok()); std::vector file_ids; - stat = db_->Query(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, results); + stat = db_->QueryByFileID(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, result_ids, result_distances); ASSERT_FALSE(stat.ok()); - stat = db_->DeleteTable(table_info.table_id_, dates); + stat = db_->DropTable(table_info.table_id_, dates); ASSERT_FALSE(stat.ok()); } @@ -408,7 +416,7 @@ TEST_F(DBTest, INDEX_TEST) { BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids.size(), nb); milvus::engine::TableIndex index; @@ -438,6 +446,106 @@ TEST_F(DBTest, INDEX_TEST) { ASSERT_TRUE(stat.ok()); } +TEST_F(DBTest, PARTITION_TEST) { + milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + auto stat = db_->CreateTable(table_info); + ASSERT_TRUE(stat.ok()); + + // create partition and insert data + const int64_t PARTITION_COUNT = 5; + const int64_t INSERT_BATCH = 2000; + std::string table_name = TABLE_NAME; + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + std::string partition_tag = std::to_string(i); + std::string partition_name = table_name + "_" + partition_tag; + stat = db_->CreatePartition(table_name, partition_name, partition_tag); + ASSERT_TRUE(stat.ok()); + + + std::vector xb; + BuildVectors(INSERT_BATCH, xb); + + milvus::engine::IDNumbers vector_ids; + vector_ids.resize(INSERT_BATCH); + for (int64_t k = 0; k < INSERT_BATCH; k++) { + vector_ids[k] = i*INSERT_BATCH + k; + } + + db_->InsertVectors(table_name, partition_tag, INSERT_BATCH, xb.data(), vector_ids); + ASSERT_EQ(vector_ids.size(), INSERT_BATCH); + } + + //duplicated partition is not allowed + stat = db_->CreatePartition(table_name, "", "0"); + ASSERT_FALSE(stat.ok()); + + std::vector partiton_schema_array; + stat = db_->ShowPartitions(table_name, partiton_schema_array); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(partiton_schema_array.size(), PARTITION_COUNT); + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + ASSERT_EQ(partiton_schema_array[i].table_id_, table_name + "_" + std::to_string(i)); + } + + { // build index + milvus::engine::TableIndex index; + index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT; + index.metric_type_ = (int) milvus::engine::MetricType::L2; + stat = db_->CreateIndex(table_info.table_id_, index); + ASSERT_TRUE(stat.ok()); + + uint64_t row_count = 0; + stat = db_->GetTableRowCount(TABLE_NAME, row_count); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(row_count, INSERT_BATCH*PARTITION_COUNT); + } + + { // search + const int64_t nq = 5; + const int64_t topk = 10; + const int64_t nprobe = 10; + std::vector xq; + BuildVectors(nq, xq); + + // specify partition tags + std::vector tags = {"0", std::to_string(PARTITION_COUNT - 1)}; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in whole table + tags.clear(); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in all partitions(tag regex match) + tags.push_back("\\d"); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + } + + stat = db_->DropPartition(table_name + "_0"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropPartitionByTag(table_name, "1"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropIndex(table_name); + ASSERT_TRUE(stat.ok()); + + milvus::engine::meta::DatesT dates; + stat = db_->DropTable(table_name, dates); + ASSERT_TRUE(stat.ok()); +} + TEST_F(DBTest2, ARHIVE_DISK_CHECK) { milvus::engine::meta::TableSchema table_info = BuildTableSchema(); auto stat = db_->CreateTable(table_info); @@ -470,7 +578,7 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) { int loop = INSERT_LOOP; for (auto i = 0; i < loop; ++i) { milvus::engine::IDNumbers vector_ids; - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -502,12 +610,12 @@ TEST_F(DBTest2, DELETE_TEST) { BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + stat = db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); milvus::engine::TableIndex index; stat = db_->CreateIndex(TABLE_NAME, index); std::vector dates; - stat = db_->DeleteTable(TABLE_NAME, dates); + stat = db_->DropTable(TABLE_NAME, dates); std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_TRUE(stat.ok()); @@ -537,7 +645,7 @@ TEST_F(DBTest2, DELETE_BY_RANGE_TEST) { BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + stat = db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); milvus::engine::TableIndex index; stat = db_->CreateIndex(TABLE_NAME, index); @@ -549,7 +657,7 @@ TEST_F(DBTest2, DELETE_BY_RANGE_TEST) { std::string end_value = CurrentTmDate(1); ConvertTimeRangeToDBDates(start_value, end_value, dates); - stat = db_->DeleteTable(TABLE_NAME, dates); + stat = db_->DropTable(TABLE_NAME, dates); ASSERT_TRUE(stat.ok()); uint64_t row_count = 0; diff --git a/core/unittest/db/test_db_mysql.cpp b/core/unittest/db/test_db_mysql.cpp index e0a84662a4..f828431838 100644 --- a/core/unittest/db/test_db_mysql.cpp +++ b/core/unittest/db/test_db_mysql.cpp @@ -77,11 +77,12 @@ TEST_F(MySqlDBTest, DB_TEST) { std::vector qxb; BuildVectors(qb, qxb); - db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); + db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); std::thread search([&]() { - milvus::engine::QueryResults results; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(5)); @@ -96,25 +97,26 @@ TEST_F(MySqlDBTest, DB_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results); + std::vector tags; + stat = db_->Query(TABLE_NAME, tags, k, qb, 10, qxb.data(), result_ids, result_distances); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto k = 0; k < qb; ++k) { - // std::cout << results[k][0].first << " " << target_ids[k] << std::endl; - // ASSERT_EQ(results[k][0].first, target_ids[k]); + for (auto i = 0; i < qb; ++i) { +// std::cout << results[k][0].first << " " << target_ids[k] << std::endl; +// ASSERT_EQ(results[k][0].first, target_ids[k]); bool exists = false; - for (auto& result : results[k]) { - if (result.first == target_ids[k]) { + for (auto t = 0; t < k; t++) { + if (result_ids[i * k + t] == target_ids[i]) { exists = true; } } ASSERT_TRUE(exists); ss.str(""); - ss << "Result [" << k << "]:"; - for (auto result : results[k]) { - ss << result.first << " "; + ss << "Result [" << i << "]:"; + for (auto t = 0; t < k; t++) { + ss << result_ids[i * k + t] << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -128,13 +130,13 @@ TEST_F(MySqlDBTest, DB_TEST) { int loop = INSERT_LOOP; for (auto i = 0; i < loop; ++i) { - // if (i==10) { - // db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids); - // ASSERT_EQ(target_ids.size(), qb); - // } else { - // db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); - // } - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); +// if (i==10) { +// db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids); +// ASSERT_EQ(target_ids.size(), qb); +// } else { +// db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); +// } + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -181,17 +183,17 @@ TEST_F(MySqlDBTest, SEARCH_TEST) { // insert data const int batch_size = 100; for (int j = 0; j < nb / batch_size; ++j) { - stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); - if (j == 200) { - sleep(1); - } + stat = db_->InsertVectors(TABLE_NAME, "", batch_size, xb.data() + batch_size * j * TABLE_DIM, ids); + if (j == 200) { sleep(1); } ASSERT_TRUE(stat.ok()); } sleep(2); // wait until build index finish - milvus::engine::QueryResults results; - stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results); + std::vector tags; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, k, nq, 10, xq.data(), result_ids, result_distances); ASSERT_TRUE(stat.ok()); } @@ -229,7 +231,7 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) { int loop = INSERT_LOOP; for (auto i = 0; i < loop; ++i) { - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -265,17 +267,117 @@ TEST_F(MySqlDBTest, DELETE_TEST) { int loop = 20; for (auto i = 0; i < loop; ++i) { - db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids); + db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids); std::this_thread::sleep_for(std::chrono::microseconds(1)); } - // std::vector dates; - // stat = db_->DeleteTable(TABLE_NAME, dates); - //// std::cout << "5 sec start" << std::endl; - // std::this_thread::sleep_for(std::chrono::seconds(5)); - //// std::cout << "5 sec finish" << std::endl; - // ASSERT_TRUE(stat.ok()); - // - // db_->HasTable(TABLE_NAME, has_table); - // ASSERT_FALSE(has_table); +// std::vector dates; +// stat = db_->DropTable(TABLE_NAME, dates); +//// std::cout << "5 sec start" << std::endl; +// std::this_thread::sleep_for(std::chrono::seconds(5)); +//// std::cout << "5 sec finish" << std::endl; +// ASSERT_TRUE(stat.ok()); +// +// db_->HasTable(TABLE_NAME, has_table); +// ASSERT_FALSE(has_table); +} + +TEST_F(MySqlDBTest, PARTITION_TEST) { + milvus::engine::meta::TableSchema table_info = BuildTableSchema(); + auto stat = db_->CreateTable(table_info); + ASSERT_TRUE(stat.ok()); + + // create partition and insert data + const int64_t PARTITION_COUNT = 5; + const int64_t INSERT_BATCH = 2000; + std::string table_name = TABLE_NAME; + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + std::string partition_tag = std::to_string(i); + std::string partition_name = table_name + "_" + partition_tag; + stat = db_->CreatePartition(table_name, partition_name, partition_tag); + ASSERT_TRUE(stat.ok()); + + + std::vector xb; + BuildVectors(INSERT_BATCH, xb); + + milvus::engine::IDNumbers vector_ids; + vector_ids.resize(INSERT_BATCH); + for (int64_t k = 0; k < INSERT_BATCH; k++) { + vector_ids[k] = i*INSERT_BATCH + k; + } + + db_->InsertVectors(table_name, partition_tag, INSERT_BATCH, xb.data(), vector_ids); + ASSERT_EQ(vector_ids.size(), INSERT_BATCH); + } + + //duplicated partition is not allowed + stat = db_->CreatePartition(table_name, "", "0"); + ASSERT_FALSE(stat.ok()); + + std::vector partiton_schema_array; + stat = db_->ShowPartitions(table_name, partiton_schema_array); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(partiton_schema_array.size(), PARTITION_COUNT); + for (int64_t i = 0; i < PARTITION_COUNT; i++) { + ASSERT_EQ(partiton_schema_array[i].table_id_, table_name + "_" + std::to_string(i)); + } + + { // build index + milvus::engine::TableIndex index; + index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT; + index.metric_type_ = (int) milvus::engine::MetricType::L2; + stat = db_->CreateIndex(table_info.table_id_, index); + ASSERT_TRUE(stat.ok()); + + uint64_t row_count = 0; + stat = db_->GetTableRowCount(TABLE_NAME, row_count); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(row_count, INSERT_BATCH*PARTITION_COUNT); + } + + { // search + const int64_t nq = 5; + const int64_t topk = 10; + const int64_t nprobe = 10; + std::vector xq; + BuildVectors(nq, xq); + + // specify partition tags + std::vector tags = {"0", std::to_string(PARTITION_COUNT - 1)}; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in whole table + tags.clear(); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + + // search in all partitions(tag regex match) + tags.push_back("\\d"); + result_ids.clear(); + result_distances.clear(); + stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances); + ASSERT_TRUE(stat.ok()); + ASSERT_EQ(result_ids.size()/topk, nq); + } + + stat = db_->DropPartition(table_name + "_0"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropPartitionByTag(table_name, "1"); + ASSERT_TRUE(stat.ok()); + + stat = db_->DropIndex(table_name); + ASSERT_TRUE(stat.ok()); + + milvus::engine::meta::DatesT dates; + stat = db_->DropTable(table_name, dates); + ASSERT_TRUE(stat.ok()); } diff --git a/core/unittest/db/test_mem.cpp b/core/unittest/db/test_mem.cpp index f3c635db49..7139553feb 100644 --- a/core/unittest/db/test_mem.cpp +++ b/core/unittest/db/test_mem.cpp @@ -231,7 +231,7 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { vector_ids.push_back(i); } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_TRUE(stat.ok()); std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk @@ -254,10 +254,13 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) { int topk = 10, nprobe = 10; for (auto& pair : search_vectors) { auto& search = pair.second; - milvus::engine::QueryResults results; - stat = db_->Query(GetTableName(), topk, 1, nprobe, search.data(), results); - ASSERT_EQ(results[0][0].first, pair.first); - ASSERT_LT(results[0][0].second, 1e-4); + + std::vector tags; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; + stat = db_->Query(GetTableName(), tags, topk, 1, nprobe, search.data(), result_ids, result_distances); + ASSERT_EQ(result_ids[0], pair.first); + ASSERT_LT(result_distances[0], 1e-4); } } @@ -279,7 +282,7 @@ TEST_F(MemManagerTest2, INSERT_TEST) { std::vector xb; BuildVectors(nb, xb); milvus::engine::IDNumbers vector_ids; - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_TRUE(stat.ok()); } auto end_time = METRICS_NOW_TIME; @@ -309,7 +312,8 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { BuildVectors(qb, qxb); std::thread search([&]() { - milvus::engine::QueryResults results; + milvus::engine::ResultIds result_ids; + milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -324,17 +328,19 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { prev_count = count; START_TIMER; - stat = db_->Query(GetTableName(), k, qb, 10, qxb.data(), results); + + std::vector tags; + stat = db_->Query(GetTableName(), tags, k, qb, 10, qxb.data(), result_ids, result_distances); ss << "Search " << j << " With Size " << count / milvus::engine::M << " M"; STOP_TIMER(ss.str()); ASSERT_TRUE(stat.ok()); - for (auto k = 0; k < qb; ++k) { - ASSERT_EQ(results[k][0].first, target_ids[k]); + for (auto i = 0; i < qb; ++i) { + ASSERT_EQ(result_ids[i * k], target_ids[i]); ss.str(""); - ss << "Result [" << k << "]:"; - for (auto result : results[k]) { - ss << result.first << " "; + ss << "Result [" << i << "]:"; + for (auto t = 0; t < k; t++) { + ss << result_ids[i * k + t] << " "; } /* LOG(DEBUG) << ss.str(); */ } @@ -347,10 +353,10 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) { for (auto i = 0; i < loop; ++i) { if (i == 0) { - db_->InsertVectors(GetTableName(), qb, qxb.data(), target_ids); + db_->InsertVectors(GetTableName(), "", qb, qxb.data(), target_ids); ASSERT_EQ(target_ids.size(), qb); } else { - db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); } std::this_thread::sleep_for(std::chrono::microseconds(1)); } @@ -379,7 +385,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { vector_ids[i] = i; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids[0], 0); ASSERT_TRUE(stat.ok()); @@ -391,7 +397,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { for (auto i = 0; i < nb; i++) { vector_ids[i] = i + nb; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids[0], nb); ASSERT_TRUE(stat.ok()); @@ -403,7 +409,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { for (auto i = 0; i < nb; i++) { vector_ids[i] = i + nb / 2; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_EQ(vector_ids[0], nb / 2); ASSERT_TRUE(stat.ok()); @@ -411,7 +417,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { xb.clear(); BuildVectors(nb, xb); vector_ids.clear(); - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); ASSERT_TRUE(stat.ok()); nb = 100; @@ -422,7 +428,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) { for (auto i = 0; i < nb; i++) { vector_ids[i] = i + nb; } - stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids); + stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids); for (auto i = 0; i < nb; i++) { ASSERT_EQ(vector_ids[i], i + nb); } diff --git a/core/unittest/db/test_meta.cpp b/core/unittest/db/test_meta.cpp index 1311f93141..097f004bd1 100644 --- a/core/unittest/db/test_meta.cpp +++ b/core/unittest/db/test_meta.cpp @@ -84,14 +84,14 @@ TEST_F(MetaTest, TABLE_FILE_TEST) { milvus::engine::meta::DatesT dates; dates.push_back(milvus::engine::utils::GetDate()); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); dates.clear(); for (auto i = 2; i < 10; ++i) { dates.push_back(milvus::engine::utils::GetDateWithDelta(-1 * i)); } - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); table_file.date_ = milvus::engine::utils::GetDateWithDelta(-2); @@ -102,7 +102,7 @@ TEST_F(MetaTest, TABLE_FILE_TEST) { dates.clear(); dates.push_back(table_file.date_); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); std::vector ids = {table_file.id_}; @@ -332,7 +332,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) { status = impl_->CleanUp(); ASSERT_TRUE(status.ok()); - status = impl_->DeleteTable(table_id); + status = impl_->DropTable(table_id); ASSERT_TRUE(status.ok()); status = impl_->CleanUpFilesWithTTL(1UL); diff --git a/core/unittest/db/test_meta_mysql.cpp b/core/unittest/db/test_meta_mysql.cpp index 2dbd26486d..b9a82c0748 100644 --- a/core/unittest/db/test_meta_mysql.cpp +++ b/core/unittest/db/test_meta_mysql.cpp @@ -74,7 +74,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { milvus::engine::meta::DatesT dates; dates.push_back(milvus::engine::utils::GetDate()); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); uint64_t cnt = 0; @@ -95,7 +95,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { for (auto i = 2; i < 10; ++i) { dates.push_back(milvus::engine::utils::GetDateWithDelta(-1 * i)); } - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); table_file.date_ = milvus::engine::utils::GetDateWithDelta(-2); @@ -106,7 +106,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) { dates.clear(); dates.push_back(table_file.date_); - status = impl_->DropPartitionsByDates(table_file.table_id_, dates); + status = impl_->DropDataByDate(table_file.table_id_, dates); ASSERT_TRUE(status.ok()); std::vector ids = {table_file.id_}; @@ -346,7 +346,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) { status = impl_->DeleteTableFiles(table_id); ASSERT_TRUE(status.ok()); - status = impl_->DeleteTable(table_id); + status = impl_->DropTable(table_id); ASSERT_TRUE(status.ok()); status = impl_->CleanUpFilesWithTTL(0UL); diff --git a/core/unittest/db/test_search.cpp b/core/unittest/db/test_search.cpp index 402ba2cd6b..1d1d9a677a 100644 --- a/core/unittest/db/test_search.cpp +++ b/core/unittest/db/test_search.cpp @@ -19,73 +19,97 @@ #include #include +#include "scheduler/job/SearchJob.h" #include "scheduler/task/SearchTask.h" -#include "utils/ThreadPool.h" #include "utils/TimeRecorder.h" +#include "utils/ThreadPool.h" namespace { namespace ms = milvus::scheduler; void -BuildResult(std::vector& output_ids, std::vector& output_distance, uint64_t input_k, uint64_t topk, - uint64_t nq, bool ascending) { +BuildResult(ms::ResultIds& output_ids, + ms::ResultDistances & output_distances, + size_t input_k, + size_t topk, + size_t nq, + bool ascending) { output_ids.clear(); output_ids.resize(nq * topk); - output_distance.clear(); - output_distance.resize(nq * topk); + output_distances.clear(); + output_distances.resize(nq * topk); - for (uint64_t i = 0; i < nq; i++) { - // insert valid items - for (uint64_t j = 0; j < input_k; j++) { + for (size_t i = 0; i < nq; i++) { + //insert valid items + for (size_t j = 0; j < input_k; j++) { output_ids[i * topk + j] = (int64_t)(drand48() * 100000); - output_distance[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48()); + output_distances[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48()); } - // insert invalid items - for (uint64_t j = input_k; j < topk; j++) { + //insert invalid items + for (size_t j = input_k; j < topk; j++) { output_ids[i * topk + j] = -1; - output_distance[i * topk + j] = -1.0; + output_distances[i * topk + j] = -1.0; } } } void -CopyResult(std::vector& output_ids, std::vector& output_distance, uint64_t output_topk, - std::vector& input_ids, std::vector& input_distance, uint64_t input_topk, uint64_t nq) { +CopyResult(ms::ResultIds& output_ids, + ms::ResultDistances& output_distances, + size_t output_topk, + ms::ResultIds& input_ids, + ms::ResultDistances& input_distances, + size_t input_topk, + size_t nq) { ASSERT_TRUE(input_ids.size() >= nq * input_topk); - ASSERT_TRUE(input_distance.size() >= nq * input_topk); + ASSERT_TRUE(input_distances.size() >= nq * input_topk); ASSERT_TRUE(output_topk <= input_topk); output_ids.clear(); output_ids.resize(nq * output_topk); - output_distance.clear(); - output_distance.resize(nq * output_topk); + output_distances.clear(); + output_distances.resize(nq * output_topk); - for (uint64_t i = 0; i < nq; i++) { - for (uint64_t j = 0; j < output_topk; j++) { + for (size_t i = 0; i < nq; i++) { + for (size_t j = 0; j < output_topk; j++) { output_ids[i * output_topk + j] = input_ids[i * input_topk + j]; - output_distance[i * output_topk + j] = input_distance[i * input_topk + j]; + output_distances[i * output_topk + j] = input_distances[i * input_topk + j]; } } } void -CheckTopkResult(const std::vector& input_ids_1, const std::vector& input_distance_1, - const std::vector& input_ids_2, const std::vector& input_distance_2, uint64_t topk, - uint64_t nq, bool ascending, const milvus::scheduler::ResultSet& result) { - ASSERT_EQ(result.size(), nq); - ASSERT_EQ(input_ids_1.size(), input_distance_1.size()); - ASSERT_EQ(input_ids_2.size(), input_distance_2.size()); +CheckTopkResult(const ms::ResultIds& input_ids_1, + const ms::ResultDistances& input_distances_1, + size_t input_k_1, + const ms::ResultIds& input_ids_2, + const ms::ResultDistances& input_distances_2, + size_t input_k_2, + size_t topk, + size_t nq, + bool ascending, + const ms::ResultIds& result_ids, + const ms::ResultDistances& result_distances) { + ASSERT_EQ(result_ids.size(), result_distances.size()); + ASSERT_EQ(input_ids_1.size(), input_distances_1.size()); + ASSERT_EQ(input_ids_2.size(), input_distances_2.size()); - for (int64_t i = 0; i < nq; i++) { - std::vector src_vec(input_distance_1.begin() + i * topk, input_distance_1.begin() + (i + 1) * topk); - src_vec.insert(src_vec.end(), input_distance_2.begin() + i * topk, input_distance_2.begin() + (i + 1) * topk); + size_t result_k = result_distances.size() / nq; + ASSERT_EQ(result_k, std::min(topk, input_k_1 + input_k_2)); + + for (size_t i = 0; i < nq; i++) { + std::vector + src_vec(input_distances_1.begin() + i * topk, input_distances_1.begin() + (i + 1) * topk); + src_vec.insert(src_vec.end(), + input_distances_2.begin() + i * topk, + input_distances_2.begin() + (i + 1) * topk); if (ascending) { std::sort(src_vec.begin(), src_vec.end()); } else { std::sort(src_vec.begin(), src_vec.end(), std::greater()); } - // erase invalid items + //erase invalid items std::vector::iterator iter; for (iter = src_vec.begin(); iter != src_vec.end();) { if (*iter < 0.0) @@ -94,36 +118,38 @@ CheckTopkResult(const std::vector& input_ids_1, const std::vector ids1, ids2; - std::vector dist1, dist2; - ms::ResultSet result; +MergeTopkToResultSetTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) { + ms::ResultIds ids1, ids2; + ms::ResultDistances dist1, dist2; + ms::ResultIds result_ids; + ms::ResultDistances result_distances; BuildResult(ids1, dist1, topk_1, topk, nq, ascending); BuildResult(ids2, dist2, topk_2, topk, nq, ascending); - ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result); - ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result); - CheckTopkResult(ids1, dist1, ids2, dist2, topk, nq, ascending, result); + ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result_ids, result_distances); + ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result_ids, result_distances); + CheckTopkResult(ids1, dist1, topk_1, ids2, dist2, topk_2, topk, nq, ascending, result_ids, result_distances); } TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { - uint64_t NQ = 15; - uint64_t TOP_K = 64; + size_t NQ = 15; + size_t TOP_K = 64; /* test1, id1/dist1 valid, id2/dist2 empty */ MergeTopkToResultSetTest(TOP_K, 0, NQ, TOP_K, true); @@ -142,21 +168,21 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { MergeTopkToResultSetTest(TOP_K / 2, TOP_K / 3, NQ, TOP_K, false); } -// void MergeTopkArrayTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) { +//void MergeTopkArrayTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) { // std::vector ids1, ids2; // std::vector dist1, dist2; // ms::ResultSet result; // BuildResult(ids1, dist1, topk_1, topk, nq, ascending); // BuildResult(ids2, dist2, topk_2, topk, nq, ascending); -// uint64_t result_topk = std::min(topk, topk_1 + topk_2); +// size_t result_topk = std::min(topk, topk_1 + topk_2); // ms::XSearchTask::MergeTopkArray(ids1, dist1, topk_1, ids2, dist2, topk_2, nq, topk, ascending); // if (ids1.size() != result_topk * nq) { // std::cout << ids1.size() << " " << result_topk * nq << std::endl; // } // ASSERT_TRUE(ids1.size() == result_topk * nq); // ASSERT_TRUE(dist1.size() == result_topk * nq); -// for (uint64_t i = 0; i < nq; i++) { -// for (uint64_t k = 1; k < result_topk; k++) { +// for (size_t i = 0; i < nq; i++) { +// for (size_t k = 1; k < result_topk; k++) { // float f0 = dist1[i * topk + k - 1]; // float f1 = dist1[i * topk + k]; // if (ascending) { @@ -174,9 +200,9 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { // } //} -// TEST(DBSearchTest, MERGE_ARRAY_TEST) { -// uint64_t NQ = 15; -// uint64_t TOP_K = 64; +//TEST(DBSearchTest, MERGE_ARRAY_TEST) { +// size_t NQ = 15; +// size_t TOP_K = 64; // // /* test1, id1/dist1 valid, id2/dist2 empty */ // MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, true); @@ -202,26 +228,26 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) { //} TEST(DBSearchTest, REDUCE_PERF_TEST) { - int32_t index_file_num = 478; /* sift1B dataset, index files num */ + int32_t index_file_num = 478; /* sift1B dataset, index files num */ bool ascending = true; - std::vector thread_vec = {4, 8}; - std::vector nq_vec = {1, 10, 100}; - std::vector topk_vec = {1, 4, 16, 64}; - int32_t NQ = nq_vec[nq_vec.size() - 1]; - int32_t TOPK = topk_vec[topk_vec.size() - 1]; + std::vector thread_vec = {4}; + std::vector nq_vec = {1000}; + std::vector topk_vec = {64}; + size_t NQ = nq_vec[nq_vec.size() - 1]; + size_t TOPK = topk_vec[topk_vec.size() - 1]; - std::vector> id_vec; - std::vector> dist_vec; - std::vector input_ids; - std::vector input_distance; + std::vector id_vec; + std::vector dist_vec; + ms::ResultIds input_ids; + ms::ResultDistances input_distances; int32_t i, k, step; /* generate testing data */ for (i = 0; i < index_file_num; i++) { - BuildResult(input_ids, input_distance, TOPK, TOPK, NQ, ascending); + BuildResult(input_ids, input_distances, TOPK, TOPK, NQ, ascending); id_vec.push_back(input_ids); - dist_vec.push_back(input_distance); + dist_vec.push_back(input_distances); } for (int32_t max_thread_num : thread_vec) { @@ -230,136 +256,144 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) { for (int32_t nq : nq_vec) { for (int32_t top_k : topk_vec) { - ms::ResultSet final_result, final_result_2, final_result_3; + ms::ResultIds final_result_ids, final_result_ids_2, final_result_ids_3; + ms::ResultDistances final_result_distances, final_result_distances_2, final_result_distances_3; - std::vector> id_vec_1(index_file_num); - std::vector> dist_vec_1(index_file_num); + std::vector id_vec_1(index_file_num); + std::vector dist_vec_1(index_file_num); for (i = 0; i < index_file_num; i++) { CopyResult(id_vec_1[i], dist_vec_1[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); } - std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " + std::to_string(nq) + " " + - std::to_string(top_k); + std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " + + std::to_string(nq) + " " + std::to_string(top_k); milvus::TimeRecorder rc1(str1); /////////////////////////////////////////////////////////////////////////////////////// /* method-1 */ for (i = 0; i < index_file_num; i++) { - ms::XSearchTask::MergeTopkToResultSet(id_vec_1[i], dist_vec_1[i], top_k, nq, top_k, ascending, - final_result); - ASSERT_EQ(final_result.size(), nq); + ms::XSearchTask::MergeTopkToResultSet(id_vec_1[i], + dist_vec_1[i], + top_k, + nq, + top_k, + ascending, + final_result_ids, + final_result_distances); + ASSERT_EQ(final_result_ids.size(), nq * top_k); + ASSERT_EQ(final_result_distances.size(), nq * top_k); } rc1.RecordSection("reduce done"); - // /////////////////////////////////////////////////////////////////////////////////////// - // /* method-2 */ - // std::vector> id_vec_2(index_file_num); - // std::vector> dist_vec_2(index_file_num); - // std::vector k_vec_2(index_file_num); - // for (i = 0; i < index_file_num; i++) { - // CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); - // k_vec_2[i] = top_k; - // } - // - // std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " + - // std::to_string(nq) + " " + std::to_string(top_k); - // milvus::TimeRecorder rc2(str2); - // - // for (step = 1; step < index_file_num; step *= 2) { - // for (i = 0; i + step < index_file_num; i += step * 2) { - // ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i], - // id_vec_2[i + step], dist_vec_2[i + step], - // k_vec_2[i + step], nq, top_k, ascending); - // } - // } - // ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0], - // dist_vec_2[0], - // k_vec_2[0], - // nq, - // top_k, - // ascending, - // final_result_2); - // ASSERT_EQ(final_result_2.size(), nq); - // - // rc2.RecordSection("reduce done"); - // - // for (i = 0; i < nq; i++) { - // ASSERT_EQ(final_result[i].size(), final_result_2[i].size()); - // for (k = 0; k < final_result[i].size(); k++) { - // if (final_result[i][k].first != final_result_2[i][k].first) { - // std::cout << i << " " << k << std::endl; - // } - // ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first); - // ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second); - // } - // } - // - // /////////////////////////////////////////////////////////////////////////////////////// - // /* method-3 parallel */ - // std::vector> id_vec_3(index_file_num); - // std::vector> dist_vec_3(index_file_num); - // std::vector k_vec_3(index_file_num); - // for (i = 0; i < index_file_num; i++) { - // CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); - // k_vec_3[i] = top_k; - // } - // - // std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " + - // std::to_string(nq) + " " + std::to_string(top_k); - // milvus::TimeRecorder rc3(str3); - // - // for (step = 1; step < index_file_num; step *= 2) { - // for (i = 0; i + step < index_file_num; i += step * 2) { - // threads_list.push_back( - // threadPool.enqueue(ms::XSearchTask::MergeTopkArray, - // std::ref(id_vec_3[i]), - // std::ref(dist_vec_3[i]), - // std::ref(k_vec_3[i]), - // std::ref(id_vec_3[i + step]), - // std::ref(dist_vec_3[i + step]), - // std::ref(k_vec_3[i + step]), - // nq, - // top_k, - // ascending)); - // } - // - // while (threads_list.size() > 0) { - // int nready = 0; - // for (auto it = threads_list.begin(); it != threads_list.end(); it = it) { - // auto &p = *it; - // std::chrono::milliseconds span(0); - // if (p.wait_for(span) == std::future_status::ready) { - // threads_list.erase(it++); - // ++nready; - // } else { - // ++it; - // } - // } - // - // if (nready == 0) { - // std::this_thread::yield(); - // } - // } - // } - // ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0], - // dist_vec_3[0], - // k_vec_3[0], - // nq, - // top_k, - // ascending, - // final_result_3); - // ASSERT_EQ(final_result_3.size(), nq); - // - // rc3.RecordSection("reduce done"); - // - // for (i = 0; i < nq; i++) { - // ASSERT_EQ(final_result[i].size(), final_result_3[i].size()); - // for (k = 0; k < final_result[i].size(); k++) { - // ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first); - // ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second); - // } - // } +// /////////////////////////////////////////////////////////////////////////////////////// +// /* method-2 */ +// std::vector> id_vec_2(index_file_num); +// std::vector> dist_vec_2(index_file_num); +// std::vector k_vec_2(index_file_num); +// for (i = 0; i < index_file_num; i++) { +// CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); +// k_vec_2[i] = top_k; +// } +// +// std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " + +// std::to_string(nq) + " " + std::to_string(top_k); +// milvus::TimeRecorder rc2(str2); +// +// for (step = 1; step < index_file_num; step *= 2) { +// for (i = 0; i + step < index_file_num; i += step * 2) { +// ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i], +// id_vec_2[i + step], dist_vec_2[i + step], k_vec_2[i + step], +// nq, top_k, ascending); +// } +// } +// ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0], +// dist_vec_2[0], +// k_vec_2[0], +// nq, +// top_k, +// ascending, +// final_result_2); +// ASSERT_EQ(final_result_2.size(), nq); +// +// rc2.RecordSection("reduce done"); +// +// for (i = 0; i < nq; i++) { +// ASSERT_EQ(final_result[i].size(), final_result_2[i].size()); +// for (k = 0; k < final_result[i].size(); k++) { +// if (final_result[i][k].first != final_result_2[i][k].first) { +// std::cout << i << " " << k << std::endl; +// } +// ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first); +// ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second); +// } +// } +// +// /////////////////////////////////////////////////////////////////////////////////////// +// /* method-3 parallel */ +// std::vector> id_vec_3(index_file_num); +// std::vector> dist_vec_3(index_file_num); +// std::vector k_vec_3(index_file_num); +// for (i = 0; i < index_file_num; i++) { +// CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq); +// k_vec_3[i] = top_k; +// } +// +// std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " + +// std::to_string(nq) + " " + std::to_string(top_k); +// milvus::TimeRecorder rc3(str3); +// +// for (step = 1; step < index_file_num; step *= 2) { +// for (i = 0; i + step < index_file_num; i += step * 2) { +// threads_list.push_back( +// threadPool.enqueue(ms::XSearchTask::MergeTopkArray, +// std::ref(id_vec_3[i]), +// std::ref(dist_vec_3[i]), +// std::ref(k_vec_3[i]), +// std::ref(id_vec_3[i + step]), +// std::ref(dist_vec_3[i + step]), +// std::ref(k_vec_3[i + step]), +// nq, +// top_k, +// ascending)); +// } +// +// while (threads_list.size() > 0) { +// int nready = 0; +// for (auto it = threads_list.begin(); it != threads_list.end(); it = it) { +// auto &p = *it; +// std::chrono::milliseconds span(0); +// if (p.wait_for(span) == std::future_status::ready) { +// threads_list.erase(it++); +// ++nready; +// } else { +// ++it; +// } +// } +// +// if (nready == 0) { +// std::this_thread::yield(); +// } +// } +// } +// ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0], +// dist_vec_3[0], +// k_vec_3[0], +// nq, +// top_k, +// ascending, +// final_result_3); +// ASSERT_EQ(final_result_3.size(), nq); +// +// rc3.RecordSection("reduce done"); +// +// for (i = 0; i < nq; i++) { +// ASSERT_EQ(final_result[i].size(), final_result_3[i].size()); +// for (k = 0; k < final_result[i].size(); k++) { +// ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first); +// ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second); +// } +// } } } } diff --git a/core/unittest/metrics/test_metrics.cpp b/core/unittest/metrics/test_metrics.cpp index 171aac17ee..10410a648d 100644 --- a/core/unittest/metrics/test_metrics.cpp +++ b/core/unittest/metrics/test_metrics.cpp @@ -15,19 +15,19 @@ // specific language governing permissions and limitations // under the License. -#include #include #include #include #include #include +#include #include "cache/CpuCacheMgr.h" -#include "db/DB.h" -#include "db/meta/SqliteMetaImpl.h" +#include "server/Config.h" #include "metrics/Metrics.h" #include "metrics/utils.h" -#include "server/Config.h" +#include "db/DB.h" +#include "db/meta/SqliteMetaImpl.h" TEST_F(MetricTest, METRIC_TEST) { milvus::server::Config::GetInstance().SetMetricConfigCollector("zabbix"); @@ -36,15 +36,15 @@ TEST_F(MetricTest, METRIC_TEST) { milvus::server::Metrics::GetInstance(); milvus::server::SystemInfo::GetInstance().Init(); - // server::Metrics::GetInstance().Init(); - // server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr()); +// server::Metrics::GetInstance().Init(); +// server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr()); milvus::server::Metrics::GetInstance().Init(); - // server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr()); +// server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr()); milvus::cache::CpuCacheMgr::GetInstance()->SetCapacity(1UL * 1024 * 1024 * 1024); std::cout << milvus::cache::CpuCacheMgr::GetInstance()->CacheCapacity() << std::endl; - static const char* group_name = "test_group"; + static const char *group_name = "test_group"; static const int group_dim = 256; milvus::engine::meta::TableSchema group_info; @@ -61,21 +61,23 @@ TEST_F(MetricTest, METRIC_TEST) { int d = 256; int nb = 50; - float* xb = new float[d * nb]; + float *xb = new float[d * nb]; for (int i = 0; i < nb; i++) { for (int j = 0; j < d; j++) xb[d * i + j] = drand48(); xb[d * i] += i / 2000.; } int qb = 5; - float* qxb = new float[d * qb]; + float *qxb = new float[d * qb]; for (int i = 0; i < qb; i++) { for (int j = 0; j < d; j++) qxb[d * i + j] = drand48(); qxb[d * i] += i / 2000.; } std::thread search([&]() { - milvus::engine::QueryResults results; +// std::vector tags; +// milvus::engine::ResultIds result_ids; +// milvus::engine::ResultDistances result_distances; int k = 10; std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -90,16 +92,17 @@ TEST_F(MetricTest, METRIC_TEST) { prev_count = count; START_TIMER; - // stat = db_->Query(group_name, k, qb, qxb, results); - ss << "Search " << j << " With Size " << (float)(count * group_dim * sizeof(float)) / (1024 * 1024) << " M"; +// stat = db_->Query(group_name, tags, k, qb, qxb, result_ids, result_distances); + ss << "Search " << j << " With Size " << (float) (count * group_dim * sizeof(float)) / (1024 * 1024) + << " M"; for (auto k = 0; k < qb; ++k) { - // ASSERT_EQ(results[k][0].first, target_ids[k]); +// ASSERT_EQ(results[k][0].first, target_ids[k]); ss.str(""); ss << "Result [" << k << "]:"; - // for (auto result : results[k]) { - // ss << result.first << " "; - // } +// for (auto result : results[k]) { +// ss << result.first << " "; +// } } ASSERT_TRUE(count >= prev_count); std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -110,10 +113,10 @@ TEST_F(MetricTest, METRIC_TEST) { for (auto i = 0; i < loop; ++i) { if (i == 40) { - db_->InsertVectors(group_name, qb, qxb, target_ids); + db_->InsertVectors(group_name, "", qb, qxb, target_ids); ASSERT_EQ(target_ids.size(), qb); } else { - db_->InsertVectors(group_name, nb, xb, vector_ids); + db_->InsertVectors(group_name, "", nb, xb, vector_ids); } std::this_thread::sleep_for(std::chrono::microseconds(2000)); } @@ -152,3 +155,5 @@ TEST_F(MetricTest, COLLECTOR_METRICS_TEST) { milvus::server::MetricCollector metric_collector(); } + + diff --git a/core/unittest/server/test_rpc.cpp b/core/unittest/server/test_rpc.cpp index 100613db7a..4d5b9e3567 100644 --- a/core/unittest/server/test_rpc.cpp +++ b/core/unittest/server/test_rpc.cpp @@ -380,6 +380,44 @@ TEST_F(RpcHandlerTest, TABLES_TEST) { ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS); } +TEST_F(RpcHandlerTest, PARTITION_TEST) { + ::grpc::ServerContext context; + ::milvus::grpc::TableSchema table_schema; + ::milvus::grpc::Status response; + std::string str_table_name = "tbl_partition"; + table_schema.set_table_name(str_table_name); + table_schema.set_dimension(TABLE_DIM); + table_schema.set_index_file_size(INDEX_FILE_SIZE); + table_schema.set_metric_type(1); + handler->CreateTable(&context, &table_schema, &response); + + ::milvus::grpc::PartitionParam partition_param; + partition_param.set_table_name(str_table_name); + std::string partition_name = "tbl_partition_0"; + partition_param.set_partition_name(partition_name); + std::string partition_tag = "0"; + partition_param.set_tag(partition_tag); + handler->CreatePartition(&context, &partition_param, &response); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); + + ::milvus::grpc::TableName table_name; + table_name.set_table_name(str_table_name); + ::milvus::grpc::PartitionList partition_list; + handler->ShowPartitions(&context, &table_name, &partition_list); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); + ASSERT_EQ(partition_list.partition_array_size(), 1); + + ::milvus::grpc::PartitionParam partition_parm; + partition_parm.set_table_name(str_table_name); + partition_parm.set_tag(partition_tag); + handler->DropPartition(&context, &partition_parm, &response); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); + + partition_parm.set_partition_name(partition_name); + handler->DropPartition(&context, &partition_parm, &response); + ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code()); +} + TEST_F(RpcHandlerTest, CMD_TEST) { ::grpc::ServerContext context; ::milvus::grpc::Command command; @@ -396,26 +434,26 @@ TEST_F(RpcHandlerTest, CMD_TEST) { TEST_F(RpcHandlerTest, DELETE_BY_RANGE_TEST) { ::grpc::ServerContext context; - ::milvus::grpc::DeleteByRangeParam request; + ::milvus::grpc::DeleteByDateParam request; ::milvus::grpc::Status status; - handler->DeleteByRange(&context, nullptr, &status); - handler->DeleteByRange(&context, &request, &status); + handler->DeleteByDate(&context, nullptr, &status); + handler->DeleteByDate(&context, &request, &status); request.set_table_name(TABLE_NAME); request.mutable_range()->set_start_value(CurrentTmDate(-3)); request.mutable_range()->set_end_value(CurrentTmDate(-2)); - ::grpc::Status grpc_status = handler->DeleteByRange(&context, &request, &status); + ::grpc::Status grpc_status = handler->DeleteByDate(&context, &request, &status); int error_code = status.error_code(); // ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS); request.mutable_range()->set_start_value("test6"); - grpc_status = handler->DeleteByRange(&context, &request, &status); + grpc_status = handler->DeleteByDate(&context, &request, &status); request.mutable_range()->set_start_value(CurrentTmDate(-2)); request.mutable_range()->set_end_value("test6"); - grpc_status = handler->DeleteByRange(&context, &request, &status); + grpc_status = handler->DeleteByDate(&context, &request, &status); request.mutable_range()->set_end_value(CurrentTmDate(-2)); - grpc_status = handler->DeleteByRange(&context, &request, &status); + grpc_status = handler->DeleteByDate(&context, &request, &status); } ////////////////////////////////////////////////////////////////////// diff --git a/core/unittest/server/test_util.cpp b/core/unittest/server/test_util.cpp index 36d0ab8597..9677c90730 100644 --- a/core/unittest/server/test_util.cpp +++ b/core/unittest/server/test_util.cpp @@ -136,6 +136,10 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) { status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result); ASSERT_TRUE(status.ok()); ASSERT_EQ(result.size(), 3UL); + + ASSERT_TRUE(milvus::server::StringHelpFunctions::IsRegexMatch("abc", "abc")); + ASSERT_TRUE(milvus::server::StringHelpFunctions::IsRegexMatch("a8c", "a\\d.")); + ASSERT_FALSE(milvus::server::StringHelpFunctions::IsRegexMatch("abc", "a\\dc")); } TEST(UtilTest, BLOCKINGQUEUE_TEST) { @@ -314,6 +318,13 @@ TEST(ValidationUtilTest, VALIDATE_NPROBE_TEST) { ASSERT_NE(milvus::server::ValidationUtil::ValidateSearchNprobe(101, schema).code(), milvus::SERVER_SUCCESS); } +TEST(ValidationUtilTest, VALIDATE_PARTITION_TAGS) { + std::vector partition_tags = {"abc"}; + ASSERT_EQ(milvus::server::ValidationUtil::ValidatePartitionTags(partition_tags).code(), milvus::SERVER_SUCCESS); + partition_tags.push_back(""); + ASSERT_NE(milvus::server::ValidationUtil::ValidatePartitionTags(partition_tags).code(), milvus::SERVER_SUCCESS); +} + #ifdef MILVUS_GPU_VERSION TEST(ValidationUtilTest, VALIDATE_GPU_TEST) { ASSERT_EQ(milvus::server::ValidationUtil::ValidateGpuIndex(0).code(), milvus::SERVER_SUCCESS); From 57e3c14825c1122513b73c81fd1b86a5d8f138d2 Mon Sep 17 00:00:00 2001 From: groot Date: Thu, 7 Nov 2019 18:46:27 +0800 Subject: [PATCH 143/196] fix unittest failure --- core/src/server/Config.cpp | 2 ++ core/unittest/server/test_config.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/core/src/server/Config.cpp b/core/src/server/Config.cpp index f241e661c8..f130e73a85 100644 --- a/core/src/server/Config.cpp +++ b/core/src/server/Config.cpp @@ -307,6 +307,7 @@ Config::ResetDefaultConfig() { return s; } +#ifdef MILVUS_GPU_VERSION s = SetCacheConfigGpuCacheCapacity(CONFIG_CACHE_GPU_CACHE_CAPACITY_DEFAULT); if (!s.ok()) { return s; @@ -316,6 +317,7 @@ Config::ResetDefaultConfig() { if (!s.ok()) { return s; } +#endif s = SetCacheConfigCacheInsertData(CONFIG_CACHE_CACHE_INSERT_DATA_DEFAULT); if (!s.ok()) { diff --git a/core/unittest/server/test_config.cpp b/core/unittest/server/test_config.cpp index 5e6f61e543..6a62ddd97c 100644 --- a/core/unittest/server/test_config.cpp +++ b/core/unittest/server/test_config.cpp @@ -216,6 +216,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) { s = config.GetCacheConfigCpuCacheThreshold(float_val); ASSERT_TRUE(float_val == cache_cpu_cache_threshold); +#ifdef MILVUS_GPU_VERSION int64_t cache_gpu_cache_capacity = 1; s = config.SetCacheConfigGpuCacheCapacity(std::to_string(cache_gpu_cache_capacity)); ASSERT_TRUE(s.ok()); @@ -228,6 +229,7 @@ TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) { ASSERT_TRUE(s.ok()); s = config.GetCacheConfigGpuCacheThreshold(float_val); ASSERT_TRUE(float_val == cache_gpu_cache_threshold); +#endif bool cache_insert_data = true; s = config.SetCacheConfigCacheInsertData(std::to_string(cache_insert_data)); From a76d223a255392da6a03019ec3f1b489c05331b1 Mon Sep 17 00:00:00 2001 From: groot Date: Fri, 8 Nov 2019 12:11:11 +0800 Subject: [PATCH 144/196] #246 Exclude src/external folder from code coverage for jenkin ci --- ci/jenkins/scripts/coverage.sh | 3 +-- core/coverage.sh | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/ci/jenkins/scripts/coverage.sh b/ci/jenkins/scripts/coverage.sh index 07ab210d2f..5c9d010d46 100755 --- a/ci/jenkins/scripts/coverage.sh +++ b/ci/jenkins/scripts/coverage.sh @@ -132,8 +132,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \ "*/src/server/Server.cpp" \ "*/src/server/DBWrapper.cpp" \ "*/src/server/grpc_impl/GrpcServer.cpp" \ - "*/src/external/easyloggingpp/easylogging++.h" \ - "*/src/external/easyloggingpp/easylogging++.cc" + "*/src/external/*" if [ $? -ne 0 ]; then echo "gen ${FILE_INFO_OUTPUT_NEW} failed" diff --git a/core/coverage.sh b/core/coverage.sh index 2cb0861de4..9011e290e5 100755 --- a/core/coverage.sh +++ b/core/coverage.sh @@ -122,8 +122,6 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \ "*/src/server/Server.cpp" \ "*/src/server/DBWrapper.cpp" \ "*/src/server/grpc_impl/GrpcServer.cpp" \ - "*/easylogging++.h" \ - "*/easylogging++.cc" \ "*/src/external/*" if [ $? -ne 0 ]; then From 258fd600fae81b488d13b3f1a14fc8a995a2a73e Mon Sep 17 00:00:00 2001 From: groot Date: Fri, 8 Nov 2019 12:19:44 +0800 Subject: [PATCH 145/196] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68bb32f539..906799bcfe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ Please mark all change in change log and use the ticket from JIRA. # Milvus 0.6.0 (TODO) ## Bug +- \#246 - Exclude src/external folder from code coverage for jenkin ci ## Feature - \#12 - Pure CPU version for Milvus From d4372d28b709956b0f736bb22943cae4e2e7ff70 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 11:12:59 +0800 Subject: [PATCH 146/196] use ccache when compiling (if available) --- core/CMakeLists.txt | 12 ++++++++++++ core/cmake/DefineOptions.cmake | 2 ++ 2 files changed, 14 insertions(+) diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 7b6a115527..65094b19a3 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -118,6 +118,18 @@ include(DefineOptions) include(BuildUtils) include(ThirdPartyPackages) +if(MILVUS_USE_CCACHE) + find_program(CCACHE_FOUND ccache) + if(CCACHE_FOUND) + message(STATUS "Using ccache: ${CCACHE_FOUND}") + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND}) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND}) + # let ccache preserve C++ comments, because some of them may be + # meaningful to the compiler + set(ENV{CCACHE_COMMENTS} "1") + endif(CCACHE_FOUND) +endif() + set(MILVUS_CPU_VERSION false) if (MILVUS_GPU_VERSION) message(STATUS "Building Milvus GPU version") diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index 6e05a12dd2..0050de75eb 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -57,6 +57,8 @@ define_option_string(MILVUS_DEPENDENCY_SOURCE "BUNDLED" "SYSTEM") +define_option(MILVUS_USE_CCACHE "Use ccache when compiling (if available)" ON) + define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD "Show output from ExternalProjects rather than just logging to files" ON) From 57768e77249f28f6d4a93220b34460b0d657da07 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 11:28:57 +0800 Subject: [PATCH 147/196] add ccache cache in .travis.yml --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index ec3b1c41fa..ee21c874dd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ language: cpp +cache: ccache sudo: required dist: bionic addons: From d94ebbca97ebc1b456f86a38b9aef2455498d04f Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 11:51:06 +0800 Subject: [PATCH 148/196] add before-install.sh in travis --- .travis.yml | 6 ++++++ ci/travis/before-install.sh | 12 ++++++++++++ 2 files changed, 18 insertions(+) create mode 100755 ci/travis/before-install.sh diff --git a/.travis.yml b/.travis.yml index ee21c874dd..aaa390620f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,10 +2,16 @@ language: cpp cache: ccache sudo: required dist: bionic + addons: apt: update: true + +before_install: + - source ci/travis/before-install.sh + install: - source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh + script: - $TRAVIS_BUILD_DIR/ci/travis/travis_build.sh diff --git a/ci/travis/before-install.sh b/ci/travis/before-install.sh new file mode 100755 index 0000000000..4ded8b8e38 --- /dev/null +++ b/ci/travis/before-install.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -ex + +if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then + export CCACHE_COMPRESS=1 + export CCACHE_COMPRESSLEVEL=5 + export CCACHE_COMPILERCHECK=content + ccache --show-stats +fi + +set +ex From b837d135550b409751ac47ceed1d72710129efc0 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 14:12:37 +0800 Subject: [PATCH 149/196] update .travis.yml --- .travis.yml | 5 ++++- ci/travis/before-install.sh | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index aaa390620f..c4729a2555 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,11 @@ language: cpp -cache: ccache sudo: required dist: bionic +cache: + directories: + - $HOME/.ccache + addons: apt: update: true diff --git a/ci/travis/before-install.sh b/ci/travis/before-install.sh index 4ded8b8e38..70133cfe1d 100755 --- a/ci/travis/before-install.sh +++ b/ci/travis/before-install.sh @@ -6,6 +6,7 @@ if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then export CCACHE_COMPRESS=1 export CCACHE_COMPRESSLEVEL=5 export CCACHE_COMPILERCHECK=content + export PATH=/usr/lib/ccache/:$PATH ccache --show-stats fi From ac61309c649a39031e9b08ceb1ccccf6a68701de Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 14:15:34 +0800 Subject: [PATCH 150/196] update ci/travis/install_dependency.sh --- ci/travis/install_dependency.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index cec4e9c30f..54e2f41ab5 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -18,6 +18,7 @@ sudo apt-get install -y -q --no-install-recommends \ lsb-core \ libtool \ automake \ + ccache \ pkg-config \ libboost-filesystem-dev \ libboost-system-dev \ From ba2548a93638f8d413c5fbaf8ac0a841911ac496 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 16:44:31 +0800 Subject: [PATCH 151/196] add FindArrow.cmake --- core/src/index/cmake/FindArrow.cmake | 431 ++++++++++++++++++ .../index/cmake/ThirdPartyPackagesCore.cmake | 24 +- 2 files changed, 440 insertions(+), 15 deletions(-) create mode 100644 core/src/index/cmake/FindArrow.cmake diff --git a/core/src/index/cmake/FindArrow.cmake b/core/src/index/cmake/FindArrow.cmake new file mode 100644 index 0000000000..fdf7c1437f --- /dev/null +++ b/core/src/index/cmake/FindArrow.cmake @@ -0,0 +1,431 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# - Find Arrow (arrow/api.h, libarrow.a, libarrow.so) +# This module defines +# ARROW_FOUND, whether Arrow has been found +# ARROW_FULL_SO_VERSION, full shared object version of found Arrow "100.0.0" +# ARROW_IMPORT_LIB, path to libarrow's import library (Windows only) +# ARROW_INCLUDE_DIR, directory containing headers +# ARROW_LIBS, deprecated. Use ARROW_LIB_DIR instead +# ARROW_LIB_DIR, directory containing Arrow libraries +# ARROW_SHARED_IMP_LIB, deprecated. Use ARROW_IMPORT_LIB instead +# ARROW_SHARED_LIB, path to libarrow's shared library +# ARROW_SO_VERSION, shared object version of found Arrow such as "100" +# ARROW_STATIC_LIB, path to libarrow.a +# ARROW_VERSION, version of found Arrow +# ARROW_VERSION_MAJOR, major version of found Arrow +# ARROW_VERSION_MINOR, minor version of found Arrow +# ARROW_VERSION_PATCH, patch version of found Arrow + +include(FindPkgConfig) +include(FindPackageHandleStandardArgs) + +set(ARROW_SEARCH_LIB_PATH_SUFFIXES) +if(CMAKE_LIBRARY_ARCHITECTURE) + list(APPEND ARROW_SEARCH_LIB_PATH_SUFFIXES "lib/${CMAKE_LIBRARY_ARCHITECTURE}") +endif() +list(APPEND ARROW_SEARCH_LIB_PATH_SUFFIXES + "lib64" + "lib32" + "lib" + "bin") +set(ARROW_CONFIG_SUFFIXES + "_RELEASE" + "_RELWITHDEBINFO" + "_MINSIZEREL" + "_DEBUG" + "") +if(CMAKE_BUILD_TYPE) + string(TOUPPER ${CMAKE_BUILD_TYPE} ARROW_CONFIG_SUFFIX_PREFERRED) + set(ARROW_CONFIG_SUFFIX_PREFERRED "_${ARROW_CONFIG_SUFFIX_PREFERRED}") + list(INSERT ARROW_CONFIG_SUFFIXES 0 "${ARROW_CONFIG_SUFFIX_PREFERRED}") +endif() + +if(NOT DEFINED ARROW_MSVC_STATIC_LIB_SUFFIX) + if(MSVC) + set(ARROW_MSVC_STATIC_LIB_SUFFIX "_static") + else() + set(ARROW_MSVC_STATIC_LIB_SUFFIX "") + endif() +endif() + +# Internal function. +# +# Set shared library name for ${base_name} to ${output_variable}. +# +# Example: +# arrow_build_shared_library_name(ARROW_SHARED_LIBRARY_NAME arrow) +# # -> ARROW_SHARED_LIBRARY_NAME=libarrow.so on Linux +# # -> ARROW_SHARED_LIBRARY_NAME=libarrow.dylib on macOS +# # -> ARROW_SHARED_LIBRARY_NAME=arrow.dll with MSVC on Windows +# # -> ARROW_SHARED_LIBRARY_NAME=libarrow.dll with MinGW on Windows +function(arrow_build_shared_library_name output_variable base_name) + set(${output_variable} + "${CMAKE_SHARED_LIBRARY_PREFIX}${base_name}${CMAKE_SHARED_LIBRARY_SUFFIX}" + PARENT_SCOPE) +endfunction() + +# Internal function. +# +# Set import library name for ${base_name} to ${output_variable}. +# This is useful only for MSVC build. Import library is used only +# with MSVC build. +# +# Example: +# arrow_build_import_library_name(ARROW_IMPORT_LIBRARY_NAME arrow) +# # -> ARROW_IMPORT_LIBRARY_NAME=arrow on Linux (meaningless) +# # -> ARROW_IMPORT_LIBRARY_NAME=arrow on macOS (meaningless) +# # -> ARROW_IMPORT_LIBRARY_NAME=arrow.lib with MSVC on Windows +# # -> ARROW_IMPORT_LIBRARY_NAME=libarrow.dll.a with MinGW on Windows +function(arrow_build_import_library_name output_variable base_name) + set(${output_variable} + "${CMAKE_IMPORT_LIBRARY_PREFIX}${base_name}${CMAKE_IMPORT_LIBRARY_SUFFIX}" + PARENT_SCOPE) +endfunction() + +# Internal function. +# +# Set static library name for ${base_name} to ${output_variable}. +# +# Example: +# arrow_build_static_library_name(ARROW_STATIC_LIBRARY_NAME arrow) +# # -> ARROW_STATIC_LIBRARY_NAME=libarrow.a on Linux +# # -> ARROW_STATIC_LIBRARY_NAME=libarrow.a on macOS +# # -> ARROW_STATIC_LIBRARY_NAME=arrow.lib with MSVC on Windows +# # -> ARROW_STATIC_LIBRARY_NAME=libarrow.dll.a with MinGW on Windows +function(arrow_build_static_library_name output_variable base_name) + set( + ${output_variable} + "${CMAKE_STATIC_LIBRARY_PREFIX}${base_name}${ARROW_MSVC_STATIC_LIB_SUFFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}" + PARENT_SCOPE) +endfunction() + +# Internal function. +# +# Set macro value for ${macro_name} in ${header_content} to ${output_variable}. +# +# Example: +# arrow_extract_macro_value(version_major +# "ARROW_VERSION_MAJOR" +# "#define ARROW_VERSION_MAJOR 1.0.0") +# # -> version_major=1.0.0 +function(arrow_extract_macro_value output_variable macro_name header_content) + string(REGEX MATCH "#define +${macro_name} +[^\r\n]+" macro_definition + "${header_content}") + string(REGEX + REPLACE "^#define +${macro_name} +(.+)$" "\\1" macro_value "${macro_definition}") + set(${output_variable} "${macro_value}" PARENT_SCOPE) +endfunction() + +# Internal macro only for arrow_find_package. +# +# Find package in HOME. +macro(arrow_find_package_home) + find_path(${prefix}_include_dir "${header_path}" + PATHS "${home}" + PATH_SUFFIXES "include" + NO_DEFAULT_PATH) + set(include_dir "${${prefix}_include_dir}") + set(${prefix}_INCLUDE_DIR "${include_dir}" PARENT_SCOPE) + + if(MSVC) + set(CMAKE_SHARED_LIBRARY_SUFFIXES_ORIGINAL ${CMAKE_FIND_LIBRARY_SUFFIXES}) + # .dll isn't found by find_library with MSVC because .dll isn't included in + # CMAKE_FIND_LIBRARY_SUFFIXES. + list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "${CMAKE_SHARED_LIBRARY_SUFFIX}") + endif() + find_library(${prefix}_shared_lib + NAMES "${shared_lib_name}" + PATHS "${home}" + PATH_SUFFIXES ${ARROW_SEARCH_LIB_PATH_SUFFIXES} + NO_DEFAULT_PATH) + if(MSVC) + set(CMAKE_SHARED_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES_ORIGINAL}) + endif() + set(shared_lib "${${prefix}_shared_lib}") + set(${prefix}_SHARED_LIB "${shared_lib}" PARENT_SCOPE) + if(shared_lib) + add_library(${target_shared} SHARED IMPORTED) + set_target_properties(${target_shared} PROPERTIES IMPORTED_LOCATION "${shared_lib}") + if(include_dir) + set_target_properties(${target_shared} + PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${include_dir}") + endif() + find_library(${prefix}_import_lib + NAMES "${import_lib_name}" + PATHS "${home}" + PATH_SUFFIXES ${ARROW_SEARCH_LIB_PATH_SUFFIXES} + NO_DEFAULT_PATH) + set(import_lib "${${prefix}_import_lib}") + set(${prefix}_IMPORT_LIB "${import_lib}" PARENT_SCOPE) + if(import_lib) + set_target_properties(${target_shared} PROPERTIES IMPORTED_IMPLIB "${import_lib}") + endif() + endif() + + find_library(${prefix}_static_lib + NAMES "${static_lib_name}" + PATHS "${home}" + PATH_SUFFIXES ${ARROW_SEARCH_LIB_PATH_SUFFIXES} + NO_DEFAULT_PATH) + set(static_lib "${${prefix}_static_lib}") + set(${prefix}_STATIC_LIB "${static_lib}" PARENT_SCOPE) + if(static_lib) + add_library(${target_static} STATIC IMPORTED) + set_target_properties(${target_static} PROPERTIES IMPORTED_LOCATION "${static_lib}") + if(include_dir) + set_target_properties(${target_static} + PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${include_dir}") + endif() + endif() +endmacro() + +# Internal macro only for arrow_find_package. +# +# Find package by CMake package configuration. +macro(arrow_find_package_cmake_package_configuration) + # ARROW-5575: We need to split target files for each component + if(TARGET ${target_shared} OR TARGET ${target_static}) + set(${cmake_package_name}_FOUND TRUE) + else() + find_package(${cmake_package_name} CONFIG) + endif() + if(${cmake_package_name}_FOUND) + set(${prefix}_USE_CMAKE_PACKAGE_CONFIG TRUE PARENT_SCOPE) + if(TARGET ${target_shared}) + foreach(suffix ${ARROW_CONFIG_SUFFIXES}) + get_target_property(shared_lib ${target_shared} IMPORTED_LOCATION${suffix}) + if(shared_lib) + # Remove shared library version: + # libarrow.so.100.0.0 -> libarrow.so + # Because ARROW_HOME and pkg-config approaches don't add + # shared library version. + string(REGEX + REPLACE "(${CMAKE_SHARED_LIBRARY_SUFFIX})[.0-9]+$" "\\1" shared_lib + "${shared_lib}") + set(${prefix}_SHARED_LIB "${shared_lib}" PARENT_SCOPE) + break() + endif() + endforeach() + endif() + if(TARGET ${target_static}) + foreach(suffix ${ARROW_CONFIG_SUFFIXES}) + get_target_property(static_lib ${target_static} IMPORTED_LOCATION${suffix}) + if(static_lib) + set(${prefix}_STATIC_LIB "${static_lib}" PARENT_SCOPE) + break() + endif() + endforeach() + endif() + endif() +endmacro() + +# Internal macro only for arrow_find_package. +# +# Find package by pkg-config. +macro(arrow_find_package_pkg_config) + pkg_check_modules(${prefix}_PC ${pkg_config_name}) + if(${prefix}_PC_FOUND) + set(${prefix}_USE_PKG_CONFIG TRUE PARENT_SCOPE) + + set(include_dir "${${prefix}_PC_INCLUDEDIR}") + set(lib_dir "${${prefix}_PC_LIBDIR}") + set(shared_lib_paths "${${prefix}_PC_LINK_LIBRARIES}") + # Use the first shared library path as the IMPORTED_LOCATION + # for ${target_shared}. This assumes that the first shared library + # path is the shared library path for this module. + list(GET shared_lib_paths 0 first_shared_lib_path) + # Use the rest shared library paths as the INTERFACE_LINK_LIBRARIES + # for ${target_shared}. This assumes that the rest shared library + # paths are dependency library paths for this module. + list(LENGTH shared_lib_paths n_shared_lib_paths) + if(n_shared_lib_paths LESS_EQUAL 1) + set(rest_shared_lib_paths) + else() + list(SUBLIST + shared_lib_paths + 1 + -1 + rest_shared_lib_paths) + endif() + + set(${prefix}_VERSION "${${prefix}_PC_VERSION}" PARENT_SCOPE) + set(${prefix}_INCLUDE_DIR "${include_dir}" PARENT_SCOPE) + set(${prefix}_SHARED_LIB "${first_shared_lib_path}" PARENT_SCOPE) + + add_library(${target_shared} SHARED IMPORTED) + set_target_properties(${target_shared} + PROPERTIES INTERFACE_INCLUDE_DIRECTORIES + "${include_dir}" + INTERFACE_LINK_LIBRARIES + "${rest_shared_lib_paths}" + IMPORTED_LOCATION + "${first_shared_lib_path}") + + find_library(${prefix}_static_lib + NAMES "${static_lib_name}" + PATHS "${lib_dir}" + NO_DEFAULT_PATH) + set(static_lib "${${prefix}_static_lib}") + set(${prefix}_STATIC_LIB "${static_lib}" PARENT_SCOPE) + if(static_lib) + add_library(${target_static} STATIC IMPORTED) + set_target_properties(${target_static} + PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${include_dir}" + IMPORTED_LOCATION "${static_lib}") + endif() + endif() +endmacro() + +function(arrow_find_package + prefix + home + base_name + header_path + cmake_package_name + pkg_config_name) + arrow_build_shared_library_name(shared_lib_name ${base_name}) + arrow_build_import_library_name(import_lib_name ${base_name}) + arrow_build_static_library_name(static_lib_name ${base_name}) + + set(target_shared ${base_name}_shared) + set(target_static ${base_name}_static) + + if(home) + arrow_find_package_home() + set(${prefix}_FIND_APPROACH "HOME: ${home}" PARENT_SCOPE) + else() + arrow_find_package_cmake_package_configuration() + if(${cmake_package_name}_FOUND) + set(${prefix}_FIND_APPROACH + "CMake package configuration: ${cmake_package_name}" + PARENT_SCOPE) + else() + arrow_find_package_pkg_config() + set(${prefix}_FIND_APPROACH "pkg-config: ${pkg_config_name}" PARENT_SCOPE) + endif() + endif() + + if(NOT include_dir) + if(TARGET ${target_shared}) + get_target_property(include_dir ${target_shared} INTERFACE_INCLUDE_DIRECTORIES) + elseif(TARGET ${target_static}) + get_target_property(include_dir ${target_static} INTERFACE_INCLUDE_DIRECTORIES) + endif() + endif() + if(include_dir) + set(${prefix}_INCLUDE_DIR "${include_dir}" PARENT_SCOPE) + endif() + + if(shared_lib) + get_filename_component(lib_dir "${shared_lib}" DIRECTORY) + elseif(static_lib) + get_filename_component(lib_dir "${static_lib}" DIRECTORY) + else() + set(lib_dir NOTFOUND) + endif() + set(${prefix}_LIB_DIR "${lib_dir}" PARENT_SCOPE) + # For backward compatibility + set(${prefix}_LIBS "${lib_dir}" PARENT_SCOPE) +endfunction() + +if(NOT "$ENV{ARROW_HOME}" STREQUAL "") + file(TO_CMAKE_PATH "$ENV{ARROW_HOME}" ARROW_HOME) +endif() +arrow_find_package(ARROW + "${ARROW_HOME}" + arrow + arrow/api.h + Arrow + arrow) + +if(ARROW_HOME) + if(ARROW_INCLUDE_DIR) + file(READ "${ARROW_INCLUDE_DIR}/arrow/util/config.h" ARROW_CONFIG_H_CONTENT) + arrow_extract_macro_value(ARROW_VERSION_MAJOR "ARROW_VERSION_MAJOR" + "${ARROW_CONFIG_H_CONTENT}") + arrow_extract_macro_value(ARROW_VERSION_MINOR "ARROW_VERSION_MINOR" + "${ARROW_CONFIG_H_CONTENT}") + arrow_extract_macro_value(ARROW_VERSION_PATCH "ARROW_VERSION_PATCH" + "${ARROW_CONFIG_H_CONTENT}") + if("${ARROW_VERSION_MAJOR}" STREQUAL "" + OR "${ARROW_VERSION_MINOR}" STREQUAL "" + OR "${ARROW_VERSION_PATCH}" STREQUAL "") + set(ARROW_VERSION "0.0.0") + else() + set(ARROW_VERSION + "${ARROW_VERSION_MAJOR}.${ARROW_VERSION_MINOR}.${ARROW_VERSION_PATCH}") + endif() + + arrow_extract_macro_value(ARROW_SO_VERSION_QUOTED "ARROW_SO_VERSION" + "${ARROW_CONFIG_H_CONTENT}") + string(REGEX REPLACE "^\"(.+)\"$" "\\1" ARROW_SO_VERSION "${ARROW_SO_VERSION_QUOTED}") + arrow_extract_macro_value(ARROW_FULL_SO_VERSION_QUOTED "ARROW_FULL_SO_VERSION" + "${ARROW_CONFIG_H_CONTENT}") + string(REGEX + REPLACE "^\"(.+)\"$" "\\1" ARROW_FULL_SO_VERSION + "${ARROW_FULL_SO_VERSION_QUOTED}") + endif() +else() + if(ARROW_USE_CMAKE_PACKAGE_CONFIG) + find_package(Arrow CONFIG) + elseif(ARROW_USE_PKG_CONFIG) + pkg_get_variable(ARROW_SO_VERSION arrow so_version) + pkg_get_variable(ARROW_FULL_SO_VERSION arrow full_so_version) + endif() +endif() + +set(ARROW_ABI_VERSION ${ARROW_SO_VERSION}) + +mark_as_advanced(ARROW_ABI_VERSION + ARROW_CONFIG_SUFFIXES + ARROW_FULL_SO_VERSION + ARROW_IMPORT_LIB + ARROW_INCLUDE_DIR + ARROW_LIBS + ARROW_LIB_DIR + ARROW_SEARCH_LIB_PATH_SUFFIXES + ARROW_SHARED_IMP_LIB + ARROW_SHARED_LIB + ARROW_SO_VERSION + ARROW_STATIC_LIB + ARROW_VERSION + ARROW_VERSION_MAJOR + ARROW_VERSION_MINOR + ARROW_VERSION_PATCH) + +find_package_handle_standard_args(Arrow REQUIRED_VARS + # The first required variable is shown + # in the found message. So this list is + # not sorted alphabetically. + ARROW_INCLUDE_DIR + ARROW_LIB_DIR + ARROW_FULL_SO_VERSION + ARROW_SO_VERSION + VERSION_VAR + ARROW_VERSION) +set(ARROW_FOUND ${Arrow_FOUND}) + +if(Arrow_FOUND AND NOT Arrow_FIND_QUIETLY) + message(STATUS "Arrow version: ${ARROW_VERSION} (${ARROW_FIND_APPROACH})") + message(STATUS "Arrow SO and ABI version: ${ARROW_SO_VERSION}") + message(STATUS "Arrow full SO version: ${ARROW_FULL_SO_VERSION}") + message(STATUS "Found the Arrow core shared library: ${ARROW_SHARED_LIB}") + message(STATUS "Found the Arrow core import library: ${ARROW_IMPORT_LIB}") + message(STATUS "Found the Arrow core static library: ${ARROW_STATIC_LIB}") +endif() diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 625fc0d6e2..ac3eb672e2 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -50,9 +50,14 @@ endmacro() macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") - #message(STATUS "Finding ${DEPENDENCY_NAME} package") - #message(STATUS "${DEPENDENCY_NAME} package not found") - build_dependency(${DEPENDENCY_NAME}) + if (${DEPENDENCY_NAME} STREQUAL "ARROW") + find_package(${DEPENDENCY_NAME} MODULE) + if (NOT ${${DEPENDENCY_NAME}_FOUND}) + build_dependency(${DEPENDENCY_NAME}) + endif () + else() + build_dependency(${DEPENDENCY_NAME}) + endif() elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED") build_dependency(${DEPENDENCY_NAME}) elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM") @@ -131,17 +136,6 @@ if (USE_JFROG_CACHE STREQUAL "ON") endif () endif () -macro(resolve_dependency DEPENDENCY_NAME) - if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") - #disable find_package for now - build_dependency(${DEPENDENCY_NAME}) - elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED") - build_dependency(${DEPENDENCY_NAME}) - elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM") - find_package(${DEPENDENCY_NAME} REQUIRED) - endif () -endmacro() - # ---------------------------------------------------------------------- # ExternalProject options @@ -412,7 +406,7 @@ if (KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep) resolve_dependency(ARROW) - link_directories(SYSTEM ${ARROW_PREFIX}/lib/) + link_directories(SYSTEM ${ARROW_LIBRARY_DIRS}) include_directories(SYSTEM ${ARROW_INCLUDE_DIR}) endif () From 8ba423db030d47ea4f5d25925dffc18cdc366ccd Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 16:55:02 +0800 Subject: [PATCH 152/196] update Index ThirdPartyPackagesCore.cmake --- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index ac3eb672e2..e23d101128 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -51,7 +51,7 @@ endmacro() macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") if (${DEPENDENCY_NAME} STREQUAL "ARROW") - find_package(${DEPENDENCY_NAME} MODULE) + find_package(Arrow MODULE) if (NOT ${${DEPENDENCY_NAME}_FOUND}) build_dependency(${DEPENDENCY_NAME}) endif () From e29cf5a0fe158394ebfb6376ed0a6fb9b98e90f0 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 17:00:29 +0800 Subject: [PATCH 153/196] Install libarrow-dev on System --- ci/scripts/build.sh | 1 + ci/travis/install_dependency.sh | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 253ee5893d..118d6a4785 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -89,6 +89,7 @@ CMAKE_CMD="cmake \ -DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ +-DARROW_SOURCE=AUTO \ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} ${CMAKE_CMD} diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index 54e2f41ab5..627da9e38a 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -9,6 +9,13 @@ sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB echo "deb https://apt.repos.intel.com/mkl all main" | \ sudo tee /etc/apt/sources.list.d/intel-mkl.list +sudo wget -O /usr/share/keyrings/apache-arrow-keyring.gpg https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-keyring.gpg + +sudo tee /etc/apt/sources.list.d/apache-arrow.list < Date: Mon, 11 Nov 2019 17:26:05 +0800 Subject: [PATCH 154/196] fix link arrow library path --- core/src/index/knowhere/CMakeLists.txt | 2 +- core/src/index/unittest/CMakeLists.txt | 2 +- core/src/index/unittest/faiss_benchmark/CMakeLists.txt | 2 +- core/src/index/unittest/faiss_ori/CMakeLists.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/src/index/knowhere/CMakeLists.txt b/core/src/index/knowhere/CMakeLists.txt index 11c79e5466..373965acf0 100644 --- a/core/src/index/knowhere/CMakeLists.txt +++ b/core/src/index/knowhere/CMakeLists.txt @@ -49,7 +49,7 @@ set(depend_libs SPTAGLibStatic faiss arrow - ${ARROW_PREFIX}/lib/libjemalloc_pic.a + ${ARROW_LIB_DIR}/libjemalloc_pic.a gomp gfortran pthread diff --git a/core/src/index/unittest/CMakeLists.txt b/core/src/index/unittest/CMakeLists.txt index 145278a636..13277aa40e 100644 --- a/core/src/index/unittest/CMakeLists.txt +++ b/core/src/index/unittest/CMakeLists.txt @@ -6,7 +6,7 @@ include_directories(${INDEX_SOURCE_DIR}) set(depend_libs gtest gmock gtest_main gmock_main faiss - arrow "${ARROW_PREFIX}/lib/libjemalloc_pic.a" + arrow "${ARROW_LIB_DIR}/libjemalloc_pic.a" ) if (BUILD_FAISS_WITH_MKL) set(depend_libs ${depend_libs} diff --git a/core/src/index/unittest/faiss_benchmark/CMakeLists.txt b/core/src/index/unittest/faiss_benchmark/CMakeLists.txt index 72eb7e7a7e..6cad5abda5 100644 --- a/core/src/index/unittest/faiss_benchmark/CMakeLists.txt +++ b/core/src/index/unittest/faiss_benchmark/CMakeLists.txt @@ -13,7 +13,7 @@ if (KNOWHERE_GPU_VERSION) set(depend_libs faiss hdf5 - arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a + arrow ${ARROW_LIB_DIR}/libjemalloc_pic.a ) if (BUILD_FAISS_WITH_MKL) set(depend_libs ${depend_libs} diff --git a/core/src/index/unittest/faiss_ori/CMakeLists.txt b/core/src/index/unittest/faiss_ori/CMakeLists.txt index 8216764ab7..829a27fd87 100644 --- a/core/src/index/unittest/faiss_ori/CMakeLists.txt +++ b/core/src/index/unittest/faiss_ori/CMakeLists.txt @@ -8,7 +8,7 @@ if (KNOWHERE_GPU_VERSION) set(depend_libs faiss - arrow ${ARROW_PREFIX}/lib/libjemalloc_pic.a + arrow ${ARROW_LIB_DIR}/libjemalloc_pic.a ) if (BUILD_FAISS_WITH_MKL) set(depend_libs ${depend_libs} From 77dd0ad9af7051be82ee83b6fdf39eaef5e97c9f Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 17:39:01 +0800 Subject: [PATCH 155/196] update ci/travis/install_dependency.sh --- ci/travis/install_dependency.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index 627da9e38a..a2424727f8 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -28,6 +28,7 @@ sudo apt-get install -y -q --no-install-recommends \ ccache \ pkg-config \ libarrow-dev \ + libjemalloc-dev \ libboost-filesystem-dev \ libboost-system-dev \ libboost-regex-dev \ From 439456f8cddb952825c8736c6daec57e3918680e Mon Sep 17 00:00:00 2001 From: quicksilver Date: Mon, 11 Nov 2019 19:48:29 +0800 Subject: [PATCH 156/196] add build with mkl option in build.sh --- ci/scripts/build.sh | 10 ++++++++-- ci/travis/travis_build.sh | 4 ++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 118d6a4785..d077325fa8 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -19,9 +19,10 @@ BUILD_COVERAGE="OFF" USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" CPU_VERSION="ON" +WITH_MKL="OFF" CUDA_COMPILER=/usr/local/cuda/bin/nvcc -while getopts "o:t:b:gulcjh" arg +while getopts "o:t:b:gulcjmh" arg do case $arg in o) @@ -49,6 +50,9 @@ do j) USE_JFROG_CACHE="ON" ;; + m) + WITH_MKL="ON" + ;; h) # help echo " @@ -61,10 +65,11 @@ parameter: -l: run cpplint, clang-format and clang-tidy(default: OFF) -c: code coverage(default: OFF) -j: use jfrog cache build directory(default: OFF) +-m: build with MKL(default: OFF) -h: help usage: -./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} [-u] [-l] [-c] [-j] [-h] +./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} [-u] [-l] [-c] [-j] [-m] [-h] " exit 0 ;; @@ -89,6 +94,7 @@ CMAKE_CMD="cmake \ -DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ +-DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ -DARROW_SOURCE=AUTO \ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} diff --git a/ci/travis/travis_build.sh b/ci/travis/travis_build.sh index 3cde1d5a4d..3f49750b6d 100755 --- a/ci/travis/travis_build.sh +++ b/ci/travis/travis_build.sh @@ -18,7 +18,7 @@ done BUILD_COMMON_FLAGS="-t ${MILVUS_BUILD_TYPE} -o ${MILVUS_INSTALL_PREFIX} -b ${MILVUS_BUILD_DIR}" if [ $only_library_mode == "yes" ]; then - ${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} + ${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -m else - ${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -u -c + ${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -m -u -c fi \ No newline at end of file From 89f094f7cb00000f46d0ef6ce1df55e5479badb1 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Mon, 11 Nov 2019 21:35:45 +0800 Subject: [PATCH 157/196] fix mkl library path error --- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index e23d101128..8840db7621 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -716,6 +716,7 @@ macro(build_faiss) find_path(MKL_LIB_PATH NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a" + PATHS /opt PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/") if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND") message(FATAL_ERROR "Could not find MKL libraries") From af68a2f4d3b7e98ed45353b057d208704a16f1bf Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Mon, 11 Nov 2019 21:58:22 +0800 Subject: [PATCH 158/196] fix mkl library path error --- ci/travis/install_dependency.sh | 6 +++--- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 3 +-- core/src/index/thirdparty/versions.txt | 3 ++- core/thirdparty/versions.txt | 1 - 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index a2424727f8..6120fccda9 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -32,8 +32,8 @@ sudo apt-get install -y -q --no-install-recommends \ libboost-filesystem-dev \ libboost-system-dev \ libboost-regex-dev \ - intel-mkl-gnu-2019.4-243 \ - intel-mkl-core-2019.4-243 \ + intel-mkl-gnu-2019.5-281 \ + intel-mkl-core-2019.5-281 \ libmysqlclient-dev \ clang-format-6.0 \ clang-tidy-6.0 \ @@ -42,4 +42,4 @@ sudo apt-get install -y -q --no-install-recommends \ sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \ /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so -export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.4.243/linux/mkl/lib/intel64 +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64 diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 8840db7621..3173a37d34 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -714,9 +714,8 @@ macro(build_faiss) if (BUILD_FAISS_WITH_MKL) - find_path(MKL_LIB_PATH + find_library(MKL_LIB_PATH NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a" - PATHS /opt PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/") if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND") message(FATAL_ERROR "Could not find MKL libraries") diff --git a/core/src/index/thirdparty/versions.txt b/core/src/index/thirdparty/versions.txt index a2b16414c2..f328ec437a 100644 --- a/core/src/index/thirdparty/versions.txt +++ b/core/src/index/thirdparty/versions.txt @@ -3,4 +3,5 @@ BOOST_VERSION=1.70.0 GTEST_VERSION=1.8.1 LAPACK_VERSION=v3.8.0 OPENBLAS_VERSION=v0.3.6 -FAISS_VERSION=branch-0.3.0 \ No newline at end of file +FAISS_VERSION=branch-0.3.0 +MKL_VERSION=2019.5.281 diff --git a/core/thirdparty/versions.txt b/core/thirdparty/versions.txt index 68023d4072..4faaf119e4 100644 --- a/core/thirdparty/versions.txt +++ b/core/thirdparty/versions.txt @@ -9,6 +9,5 @@ LIBUNWIND_VERSION=1.3.1 GPERFTOOLS_VERSION=2.7 GRPC_VERSION=master ZLIB_VERSION=v1.2.11 -MKL_VERSION=2019.5.281 # vim: set filetype=sh: From ce0d32fb53f8bfb0391fa682d31b73459c68842d Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Mon, 11 Nov 2019 22:38:41 +0800 Subject: [PATCH 159/196] fix mkl library path error --- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 3173a37d34..e23d101128 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -714,7 +714,7 @@ macro(build_faiss) if (BUILD_FAISS_WITH_MKL) - find_library(MKL_LIB_PATH + find_path(MKL_LIB_PATH NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a" PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/") if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND") From c0a728d244983c82aeb9fe9e71792c19f687045b Mon Sep 17 00:00:00 2001 From: groot Date: Tue, 12 Nov 2019 10:29:37 +0800 Subject: [PATCH 160/196] #284 Change C++ SDK to shread library --- CHANGELOG.md | 1 + core/src/sdk/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 906799bcfe..9359be28a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Please mark all change in change log and use the ticket from JIRA. - \#12 - Pure CPU version for Milvus - \#77 - Support table partition - \#226 - Experimental shards middleware for Milvus +- \#284 - Change C++ SDK to shread library ## Improvement diff --git a/core/src/sdk/CMakeLists.txt b/core/src/sdk/CMakeLists.txt index c68712d34c..71a03c38d0 100644 --- a/core/src/sdk/CMakeLists.txt +++ b/core/src/sdk/CMakeLists.txt @@ -22,7 +22,7 @@ include_directories(include) aux_source_directory(interface interface_files) aux_source_directory(grpc grpc_client_files) -add_library(milvus_sdk STATIC +add_library(milvus_sdk SHARED ${interface_files} ${grpc_client_files} ${grpc_service_files} From eb4b7c77f399236716f436b619d41bbf712fb574 Mon Sep 17 00:00:00 2001 From: groot Date: Tue, 12 Nov 2019 10:47:09 +0800 Subject: [PATCH 161/196] #284 Change C++ SDK to shread library --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9359be28a2..a951cb041f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,9 +11,9 @@ Please mark all change in change log and use the ticket from JIRA. - \#12 - Pure CPU version for Milvus - \#77 - Support table partition - \#226 - Experimental shards middleware for Milvus -- \#284 - Change C++ SDK to shread library ## Improvement +- \#284 - Change C++ SDK to shread library ## Task From f8e66f12a9f4246ee59b2a6e273d8f116760f83d Mon Sep 17 00:00:00 2001 From: quicksilver Date: Tue, 12 Nov 2019 11:12:03 +0800 Subject: [PATCH 162/196] add FindGTest.cmake --- ci/scripts/build.sh | 3 +- ci/travis/install_dependency.sh | 1 + core/cmake/DefineOptions.cmake | 2 +- core/cmake/FindGTest.cmake | 278 ++++++++++++++++++ core/cmake/ThirdPartyPackages.cmake | 6 +- core/src/index/cmake/DefineOptionsCore.cmake | 2 +- core/src/index/cmake/FindGTest.cmake | 278 ++++++++++++++++++ .../index/cmake/ThirdPartyPackagesCore.cmake | 14 +- 8 files changed, 570 insertions(+), 14 deletions(-) create mode 100644 core/cmake/FindGTest.cmake create mode 100644 core/src/index/cmake/FindGTest.cmake diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index d077325fa8..0875ac1dd1 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -95,7 +95,8 @@ CMAKE_CMD="cmake \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ -DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ --DARROW_SOURCE=AUTO \ +-DArrow_SOURCE=AUTO \ +-DGTest_SOURCE=AUTO \ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} ${CMAKE_CMD} diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index 6120fccda9..694d78b8f7 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -27,6 +27,7 @@ sudo apt-get install -y -q --no-install-recommends \ automake \ ccache \ pkg-config \ + libgtest-dev \ libarrow-dev \ libjemalloc-dev \ libboost-filesystem-dev \ diff --git a/core/cmake/DefineOptions.cmake b/core/cmake/DefineOptions.cmake index 0050de75eb..4f4c8b17b5 100644 --- a/core/cmake/DefineOptions.cmake +++ b/core/cmake/DefineOptions.cmake @@ -48,7 +48,7 @@ define_option(MILVUS_GPU_VERSION "Build GPU version" OFF) #---------------------------------------------------------------------- set_option_category("Thirdparty") -set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO") +set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "BUNDLED") define_option_string(MILVUS_DEPENDENCY_SOURCE "Method to use for acquiring MILVUS's build dependencies" diff --git a/core/cmake/FindGTest.cmake b/core/cmake/FindGTest.cmake new file mode 100644 index 0000000000..d746e40b05 --- /dev/null +++ b/core/cmake/FindGTest.cmake @@ -0,0 +1,278 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Originally imported from the CMake project at commit +# df4ed1e9ffcdb6b99ccff9e6f44808fdd2abda56 with the following license header: +# +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + + +#[=======================================================================[.rst: +FindGTest +--------- + +Locate the Google C++ Testing Framework. + +Imported targets +^^^^^^^^^^^^^^^^ + +This module defines the following :prop_tgt:`IMPORTED` targets: + +``GTest::GTest`` + The Google Test ``gtest`` library, if found; adds Thread::Thread + automatically +``GTest::Main`` + The Google Test ``gtest_main`` library, if found +``GMock::GMock`` + The Google Mock ``gmock`` library, if found + + +Result variables +^^^^^^^^^^^^^^^^ + +This module will set the following variables in your project: + +``GTEST_FOUND`` + Found the Google Testing framework +``GTEST_INCLUDE_DIRS`` + the directory containing the Google Test headers + +The library variables below are set as normal variables. These +contain debug/optimized keywords when a debugging library is found. + +``GTEST_LIBRARIES`` + The Google Test ``gtest`` library; note it also requires linking + with an appropriate thread library +``GTEST_MAIN_LIBRARIES`` + The Google Test ``gtest_main`` library +``GTEST_BOTH_LIBRARIES`` + Both ``gtest`` and ``gtest_main`` + +Cache variables +^^^^^^^^^^^^^^^ + +The following cache variables may also be set: + +``GTEST_ROOT`` + The root directory of the Google Test installation (may also be + set as an environment variable) +``GTEST_MSVC_SEARCH`` + If compiling with MSVC, this variable can be set to ``MT`` or + ``MD`` (the default) to enable searching a GTest build tree + + +Example usage +^^^^^^^^^^^^^ + +:: + + enable_testing() + find_package(GTest REQUIRED) + + add_executable(foo foo.cc) + target_link_libraries(foo GTest::GTest GTest::Main) + + add_test(AllTestsInFoo foo) + + +Deeper integration with CTest +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See :module:`GoogleTest` for information on the :command:`gtest_add_tests` +and :command:`gtest_discover_tests` commands. +#]=======================================================================] + +# include(${CMAKE_CURRENT_LIST_DIR}/GoogleTest.cmake) + +function(__gtest_append_debugs _endvar _library) + if(${_library} AND ${_library}_DEBUG) + set(_output optimized ${${_library}} debug ${${_library}_DEBUG}) + else() + set(_output ${${_library}}) + endif() + set(${_endvar} ${_output} PARENT_SCOPE) +endfunction() + +function(__gtest_find_library _name) + find_library(${_name} + NAMES ${ARGN} + HINTS + ENV GTEST_ROOT + ${GTEST_ROOT} + PATH_SUFFIXES ${_gtest_libpath_suffixes} + ) + mark_as_advanced(${_name}) +endfunction() + +macro(__gtest_determine_windows_library_type _var) + if(EXISTS "${${_var}}") + file(TO_NATIVE_PATH "${${_var}}" _lib_path) + get_filename_component(_name "${${_var}}" NAME_WE) + file(STRINGS "${${_var}}" _match REGEX "${_name}\\.dll" LIMIT_COUNT 1) + if(NOT _match STREQUAL "") + set(${_var}_TYPE SHARED PARENT_SCOPE) + else() + set(${_var}_TYPE UNKNOWN PARENT_SCOPE) + endif() + return() + endif() +endmacro() + +function(__gtest_determine_library_type _var) + if(WIN32) + # For now, at least, only Windows really needs to know the library type + __gtest_determine_windows_library_type(${_var}) + __gtest_determine_windows_library_type(${_var}_RELEASE) + __gtest_determine_windows_library_type(${_var}_DEBUG) + endif() + # If we get here, no determination was made from the above checks + set(${_var}_TYPE UNKNOWN PARENT_SCOPE) +endfunction() + +function(__gtest_import_library _target _var _config) + if(_config) + set(_config_suffix "_${_config}") + else() + set(_config_suffix "") + endif() + + set(_lib "${${_var}${_config_suffix}}") + if(EXISTS "${_lib}") + if(_config) + set_property(TARGET ${_target} APPEND PROPERTY + IMPORTED_CONFIGURATIONS ${_config}) + endif() + set_target_properties(${_target} PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES${_config_suffix} "CXX") + if(WIN32 AND ${_var}_TYPE STREQUAL SHARED) + set_target_properties(${_target} PROPERTIES + IMPORTED_IMPLIB${_config_suffix} "${_lib}") + else() + set_target_properties(${_target} PROPERTIES + IMPORTED_LOCATION${_config_suffix} "${_lib}") + endif() + endif() +endfunction() + +# + +if(NOT DEFINED GTEST_MSVC_SEARCH) + set(GTEST_MSVC_SEARCH MD) +endif() + +set(_gtest_libpath_suffixes lib) +if(MSVC) + if(GTEST_MSVC_SEARCH STREQUAL "MD") + list(APPEND _gtest_libpath_suffixes + msvc/gtest-md/Debug + msvc/gtest-md/Release + msvc/x64/Debug + msvc/x64/Release + ) + elseif(GTEST_MSVC_SEARCH STREQUAL "MT") + list(APPEND _gtest_libpath_suffixes + msvc/gtest/Debug + msvc/gtest/Release + msvc/x64/Debug + msvc/x64/Release + ) + endif() +endif() + + +find_path(GTEST_INCLUDE_DIR gtest/gtest.h + HINTS + $ENV{GTEST_ROOT}/include + ${GTEST_ROOT}/include + PATH_SUFFIXES ${LIB_PATH_SUFFIXES} +) +mark_as_advanced(GTEST_INCLUDE_DIR) + +if(MSVC AND GTEST_MSVC_SEARCH STREQUAL "MD") + # The provided /MD project files for Google Test add -md suffixes to the + # library names. + __gtest_find_library(GTEST_LIBRARY gtest-md gtest) + __gtest_find_library(GTEST_LIBRARY_DEBUG gtest-mdd gtestd) + __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main-md gtest_main) + __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_main-mdd gtest_maind) + __gtest_find_library(GMOCK_LIBRARY gmock-md gmock) + __gtest_find_library(GMOCK_LIBRARY_DEBUG gmock-mdd gmockd) +else() + __gtest_find_library(GTEST_LIBRARY gtest) + __gtest_find_library(GTEST_LIBRARY_DEBUG gtestd) + __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main) + __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_maind) + __gtest_find_library(GMOCK_LIBRARY gmock) + __gtest_find_library(GMOCK_LIBRARY_DEBUG gtestd) +endif() + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(GTest DEFAULT_MSG GTEST_LIBRARY GTEST_INCLUDE_DIR GTEST_MAIN_LIBRARY GMOCK_LIBRARY) + +if(GTEST_FOUND) + set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIR}) + __gtest_append_debugs(GTEST_LIBRARIES GTEST_LIBRARY) + __gtest_append_debugs(GTEST_MAIN_LIBRARIES GTEST_MAIN_LIBRARY) + __gtest_append_debugs(GMOCK_LIBRARIES GMOCK_LIBRARY) + set(GTEST_BOTH_LIBRARIES ${GTEST_LIBRARIES} ${GTEST_MAIN_LIBRARIES}) + + find_package(Threads QUIET) + + if(NOT TARGET GTest::GTest) + __gtest_determine_library_type(GTEST_LIBRARY) + add_library(GTest::GTest ${GTEST_LIBRARY_TYPE} IMPORTED) + if(TARGET Threads::Threads) + set_target_properties(GTest::GTest PROPERTIES + INTERFACE_LINK_LIBRARIES Threads::Threads) + endif() + if(GTEST_LIBRARY_TYPE STREQUAL "SHARED") + set_target_properties(GTest::GTest PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + endif() + if(GTEST_INCLUDE_DIRS) + set_target_properties(GTest::GTest PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") + endif() + __gtest_import_library(GTest::GTest GTEST_LIBRARY "") + __gtest_import_library(GTest::GTest GTEST_LIBRARY "RELEASE") + __gtest_import_library(GTest::GTest GTEST_LIBRARY "DEBUG") + endif() + if(NOT TARGET GTest::Main) + __gtest_determine_library_type(GTEST_MAIN_LIBRARY) + add_library(GTest::Main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) + set_target_properties(GTest::Main PROPERTIES + INTERFACE_LINK_LIBRARIES "GTest::GTest") + __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "") + __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "RELEASE") + __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "DEBUG") + endif() + if(NOT TARGET GTest::GMock) + __gtest_determine_library_type(GMOCK_LIBRARY) + add_library(GTest::GMock ${GMOCK_LIBRARY_TYPE} IMPORTED) + if(TARGET Threads::Threads) + set_target_properties(GTest::GMock PROPERTIES + INTERFACE_LINK_LIBRARIES Threads::Threads) + endif() + if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED") + set_target_properties(GTest::GMock PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + endif() + if(GTEST_INCLUDE_DIRS) + set_target_properties(GTest::GMock PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") + endif() + __gtest_import_library(GTest::GMock GMOCK_LIBRARY "") + __gtest_import_library(GTest::GMock GMOCK_LIBRARY "RELEASE") + __gtest_import_library(GTest::GMock GMOCK_LIBRARY "DEBUG") + endif() +endif() diff --git a/core/cmake/ThirdPartyPackages.cmake b/core/cmake/ThirdPartyPackages.cmake index 657efa2eef..2092879b17 100644 --- a/core/cmake/ThirdPartyPackages.cmake +++ b/core/cmake/ThirdPartyPackages.cmake @@ -164,8 +164,10 @@ endif () macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") - #disable find_package for now - build_dependency(${DEPENDENCY_NAME}) + find_package(${DEPENDENCY_NAME} MODULE) + if(NOT ${${DEPENDENCY_NAME}_FOUND}) + build_dependency(${DEPENDENCY_NAME}) + endif() elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED") build_dependency(${DEPENDENCY_NAME}) elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM") diff --git a/core/src/index/cmake/DefineOptionsCore.cmake b/core/src/index/cmake/DefineOptionsCore.cmake index e49b3a779a..2f050cdf4e 100644 --- a/core/src/index/cmake/DefineOptionsCore.cmake +++ b/core/src/index/cmake/DefineOptionsCore.cmake @@ -52,7 +52,7 @@ endif () #---------------------------------------------------------------------- set_option_category("Thirdparty") -set(KNOWHERE_DEPENDENCY_SOURCE_DEFAULT "AUTO") +set(KNOWHERE_DEPENDENCY_SOURCE_DEFAULT "BUNDLED") define_option_string(KNOWHERE_DEPENDENCY_SOURCE "Method to use for acquiring KNOWHERE's build dependencies" diff --git a/core/src/index/cmake/FindGTest.cmake b/core/src/index/cmake/FindGTest.cmake new file mode 100644 index 0000000000..d746e40b05 --- /dev/null +++ b/core/src/index/cmake/FindGTest.cmake @@ -0,0 +1,278 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Originally imported from the CMake project at commit +# df4ed1e9ffcdb6b99ccff9e6f44808fdd2abda56 with the following license header: +# +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + + +#[=======================================================================[.rst: +FindGTest +--------- + +Locate the Google C++ Testing Framework. + +Imported targets +^^^^^^^^^^^^^^^^ + +This module defines the following :prop_tgt:`IMPORTED` targets: + +``GTest::GTest`` + The Google Test ``gtest`` library, if found; adds Thread::Thread + automatically +``GTest::Main`` + The Google Test ``gtest_main`` library, if found +``GMock::GMock`` + The Google Mock ``gmock`` library, if found + + +Result variables +^^^^^^^^^^^^^^^^ + +This module will set the following variables in your project: + +``GTEST_FOUND`` + Found the Google Testing framework +``GTEST_INCLUDE_DIRS`` + the directory containing the Google Test headers + +The library variables below are set as normal variables. These +contain debug/optimized keywords when a debugging library is found. + +``GTEST_LIBRARIES`` + The Google Test ``gtest`` library; note it also requires linking + with an appropriate thread library +``GTEST_MAIN_LIBRARIES`` + The Google Test ``gtest_main`` library +``GTEST_BOTH_LIBRARIES`` + Both ``gtest`` and ``gtest_main`` + +Cache variables +^^^^^^^^^^^^^^^ + +The following cache variables may also be set: + +``GTEST_ROOT`` + The root directory of the Google Test installation (may also be + set as an environment variable) +``GTEST_MSVC_SEARCH`` + If compiling with MSVC, this variable can be set to ``MT`` or + ``MD`` (the default) to enable searching a GTest build tree + + +Example usage +^^^^^^^^^^^^^ + +:: + + enable_testing() + find_package(GTest REQUIRED) + + add_executable(foo foo.cc) + target_link_libraries(foo GTest::GTest GTest::Main) + + add_test(AllTestsInFoo foo) + + +Deeper integration with CTest +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See :module:`GoogleTest` for information on the :command:`gtest_add_tests` +and :command:`gtest_discover_tests` commands. +#]=======================================================================] + +# include(${CMAKE_CURRENT_LIST_DIR}/GoogleTest.cmake) + +function(__gtest_append_debugs _endvar _library) + if(${_library} AND ${_library}_DEBUG) + set(_output optimized ${${_library}} debug ${${_library}_DEBUG}) + else() + set(_output ${${_library}}) + endif() + set(${_endvar} ${_output} PARENT_SCOPE) +endfunction() + +function(__gtest_find_library _name) + find_library(${_name} + NAMES ${ARGN} + HINTS + ENV GTEST_ROOT + ${GTEST_ROOT} + PATH_SUFFIXES ${_gtest_libpath_suffixes} + ) + mark_as_advanced(${_name}) +endfunction() + +macro(__gtest_determine_windows_library_type _var) + if(EXISTS "${${_var}}") + file(TO_NATIVE_PATH "${${_var}}" _lib_path) + get_filename_component(_name "${${_var}}" NAME_WE) + file(STRINGS "${${_var}}" _match REGEX "${_name}\\.dll" LIMIT_COUNT 1) + if(NOT _match STREQUAL "") + set(${_var}_TYPE SHARED PARENT_SCOPE) + else() + set(${_var}_TYPE UNKNOWN PARENT_SCOPE) + endif() + return() + endif() +endmacro() + +function(__gtest_determine_library_type _var) + if(WIN32) + # For now, at least, only Windows really needs to know the library type + __gtest_determine_windows_library_type(${_var}) + __gtest_determine_windows_library_type(${_var}_RELEASE) + __gtest_determine_windows_library_type(${_var}_DEBUG) + endif() + # If we get here, no determination was made from the above checks + set(${_var}_TYPE UNKNOWN PARENT_SCOPE) +endfunction() + +function(__gtest_import_library _target _var _config) + if(_config) + set(_config_suffix "_${_config}") + else() + set(_config_suffix "") + endif() + + set(_lib "${${_var}${_config_suffix}}") + if(EXISTS "${_lib}") + if(_config) + set_property(TARGET ${_target} APPEND PROPERTY + IMPORTED_CONFIGURATIONS ${_config}) + endif() + set_target_properties(${_target} PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES${_config_suffix} "CXX") + if(WIN32 AND ${_var}_TYPE STREQUAL SHARED) + set_target_properties(${_target} PROPERTIES + IMPORTED_IMPLIB${_config_suffix} "${_lib}") + else() + set_target_properties(${_target} PROPERTIES + IMPORTED_LOCATION${_config_suffix} "${_lib}") + endif() + endif() +endfunction() + +# + +if(NOT DEFINED GTEST_MSVC_SEARCH) + set(GTEST_MSVC_SEARCH MD) +endif() + +set(_gtest_libpath_suffixes lib) +if(MSVC) + if(GTEST_MSVC_SEARCH STREQUAL "MD") + list(APPEND _gtest_libpath_suffixes + msvc/gtest-md/Debug + msvc/gtest-md/Release + msvc/x64/Debug + msvc/x64/Release + ) + elseif(GTEST_MSVC_SEARCH STREQUAL "MT") + list(APPEND _gtest_libpath_suffixes + msvc/gtest/Debug + msvc/gtest/Release + msvc/x64/Debug + msvc/x64/Release + ) + endif() +endif() + + +find_path(GTEST_INCLUDE_DIR gtest/gtest.h + HINTS + $ENV{GTEST_ROOT}/include + ${GTEST_ROOT}/include + PATH_SUFFIXES ${LIB_PATH_SUFFIXES} +) +mark_as_advanced(GTEST_INCLUDE_DIR) + +if(MSVC AND GTEST_MSVC_SEARCH STREQUAL "MD") + # The provided /MD project files for Google Test add -md suffixes to the + # library names. + __gtest_find_library(GTEST_LIBRARY gtest-md gtest) + __gtest_find_library(GTEST_LIBRARY_DEBUG gtest-mdd gtestd) + __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main-md gtest_main) + __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_main-mdd gtest_maind) + __gtest_find_library(GMOCK_LIBRARY gmock-md gmock) + __gtest_find_library(GMOCK_LIBRARY_DEBUG gmock-mdd gmockd) +else() + __gtest_find_library(GTEST_LIBRARY gtest) + __gtest_find_library(GTEST_LIBRARY_DEBUG gtestd) + __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main) + __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_maind) + __gtest_find_library(GMOCK_LIBRARY gmock) + __gtest_find_library(GMOCK_LIBRARY_DEBUG gtestd) +endif() + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(GTest DEFAULT_MSG GTEST_LIBRARY GTEST_INCLUDE_DIR GTEST_MAIN_LIBRARY GMOCK_LIBRARY) + +if(GTEST_FOUND) + set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIR}) + __gtest_append_debugs(GTEST_LIBRARIES GTEST_LIBRARY) + __gtest_append_debugs(GTEST_MAIN_LIBRARIES GTEST_MAIN_LIBRARY) + __gtest_append_debugs(GMOCK_LIBRARIES GMOCK_LIBRARY) + set(GTEST_BOTH_LIBRARIES ${GTEST_LIBRARIES} ${GTEST_MAIN_LIBRARIES}) + + find_package(Threads QUIET) + + if(NOT TARGET GTest::GTest) + __gtest_determine_library_type(GTEST_LIBRARY) + add_library(GTest::GTest ${GTEST_LIBRARY_TYPE} IMPORTED) + if(TARGET Threads::Threads) + set_target_properties(GTest::GTest PROPERTIES + INTERFACE_LINK_LIBRARIES Threads::Threads) + endif() + if(GTEST_LIBRARY_TYPE STREQUAL "SHARED") + set_target_properties(GTest::GTest PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + endif() + if(GTEST_INCLUDE_DIRS) + set_target_properties(GTest::GTest PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") + endif() + __gtest_import_library(GTest::GTest GTEST_LIBRARY "") + __gtest_import_library(GTest::GTest GTEST_LIBRARY "RELEASE") + __gtest_import_library(GTest::GTest GTEST_LIBRARY "DEBUG") + endif() + if(NOT TARGET GTest::Main) + __gtest_determine_library_type(GTEST_MAIN_LIBRARY) + add_library(GTest::Main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) + set_target_properties(GTest::Main PROPERTIES + INTERFACE_LINK_LIBRARIES "GTest::GTest") + __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "") + __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "RELEASE") + __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "DEBUG") + endif() + if(NOT TARGET GTest::GMock) + __gtest_determine_library_type(GMOCK_LIBRARY) + add_library(GTest::GMock ${GMOCK_LIBRARY_TYPE} IMPORTED) + if(TARGET Threads::Threads) + set_target_properties(GTest::GMock PROPERTIES + INTERFACE_LINK_LIBRARIES Threads::Threads) + endif() + if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED") + set_target_properties(GTest::GMock PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + endif() + if(GTEST_INCLUDE_DIRS) + set_target_properties(GTest::GMock PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") + endif() + __gtest_import_library(GTest::GMock GMOCK_LIBRARY "") + __gtest_import_library(GTest::GMock GMOCK_LIBRARY "RELEASE") + __gtest_import_library(GTest::GMock GMOCK_LIBRARY "DEBUG") + endif() +endif() diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index e23d101128..65615e885e 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -16,7 +16,7 @@ set(KNOWHERE_THIRDPARTY_DEPENDENCIES - ARROW + Arrow FAISS GTest LAPACK @@ -33,7 +33,7 @@ foreach (DEPENDENCY ${KNOWHERE_THIRDPARTY_DEPENDENCIES}) endforeach () macro(build_dependency DEPENDENCY_NAME) - if ("${DEPENDENCY_NAME}" STREQUAL "ARROW") + if ("${DEPENDENCY_NAME}" STREQUAL "Arrow") build_arrow() elseif ("${DEPENDENCY_NAME}" STREQUAL "LAPACK") build_lapack() @@ -50,13 +50,9 @@ endmacro() macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") - if (${DEPENDENCY_NAME} STREQUAL "ARROW") - find_package(Arrow MODULE) - if (NOT ${${DEPENDENCY_NAME}_FOUND}) - build_dependency(${DEPENDENCY_NAME}) - endif () - else() - build_dependency(${DEPENDENCY_NAME}) + find_package(${DEPENDENCY_NAME} MODULE) + if(NOT ${${DEPENDENCY_NAME}_FOUND}) + build_dependency(${DEPENDENCY_NAME}) endif() elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED") build_dependency(${DEPENDENCY_NAME}) From 583ec7a2b2012c5c8f3fb20dd4712bc9cab5310c Mon Sep 17 00:00:00 2001 From: quicksilver Date: Tue, 12 Nov 2019 11:34:21 +0800 Subject: [PATCH 163/196] format ARROW_LIB_DIR --- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 65615e885e..2fcc7d1001 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -286,6 +286,7 @@ macro(build_arrow) set(ARROW_STATIC_LIB "${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" ) + set(ARROW_LIB_DIR "${ARROW_PREFIX}/lib") set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include") set(ARROW_CMAKE_ARGS @@ -382,7 +383,7 @@ macro(build_arrow) ) endif () - file(MAKE_DIRECTORY "${ARROW_PREFIX}/include") + file(MAKE_DIRECTORY "${ARROW_INCLUDE_DIR}") add_library(arrow STATIC IMPORTED) set_target_properties(arrow PROPERTIES IMPORTED_LOCATION "${ARROW_STATIC_LIB}" @@ -392,8 +393,8 @@ macro(build_arrow) set(JEMALLOC_PREFIX "${INDEX_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep-build/jemalloc_ep-prefix/src/jemalloc_ep") add_custom_command(TARGET arrow_ep POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${ARROW_PREFIX}/lib/ - COMMAND ${CMAKE_COMMAND} -E copy ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a ${ARROW_PREFIX}/lib/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${ARROW_LIB_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a ${ARROW_LIB_DIR} DEPENDS ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a) endmacro() @@ -402,7 +403,7 @@ if (KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep) resolve_dependency(ARROW) - link_directories(SYSTEM ${ARROW_LIBRARY_DIRS}) + link_directories(SYSTEM ${ARROW_LIB_DIR}) include_directories(SYSTEM ${ARROW_INCLUDE_DIR}) endif () From d6fe3c468fe763dc18d2808b186e010c55ed75e2 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Tue, 12 Nov 2019 11:56:42 +0800 Subject: [PATCH 164/196] fix bug --- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 2fcc7d1001..60ac4b9eac 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -401,7 +401,7 @@ endmacro() if (KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep) - resolve_dependency(ARROW) + resolve_dependency(Arrow) link_directories(SYSTEM ${ARROW_LIB_DIR}) include_directories(SYSTEM ${ARROW_INCLUDE_DIR}) From 1a02484b21717cac956ea43822a9fd944f95ec17 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Tue, 12 Nov 2019 14:45:29 +0800 Subject: [PATCH 165/196] update arrow version to 0.15.1 --- core/src/index/thirdparty/versions.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/index/thirdparty/versions.txt b/core/src/index/thirdparty/versions.txt index f328ec437a..380c9dedad 100644 --- a/core/src/index/thirdparty/versions.txt +++ b/core/src/index/thirdparty/versions.txt @@ -1,4 +1,4 @@ -ARROW_VERSION=apache-arrow-0.14.0 +ARROW_VERSION=apache-arrow-0.15.1 BOOST_VERSION=1.70.0 GTEST_VERSION=1.8.1 LAPACK_VERSION=v3.8.0 From d82826965881cab66f1d75429f90a676d5911454 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Tue, 12 Nov 2019 14:49:49 +0800 Subject: [PATCH 166/196] update travis install_dependency.sh --- ci/travis/install_dependency.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index 694d78b8f7..5b4a5e1482 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -19,8 +19,6 @@ APT_LINE sudo apt-get update -qq sudo apt-get install -y -q --no-install-recommends \ - flex \ - bison \ gfortran \ lsb-core \ libtool \ From bd41630b1f596992920821bf71bc7f7fc43b08a2 Mon Sep 17 00:00:00 2001 From: groot Date: Tue, 12 Nov 2019 15:29:04 +0800 Subject: [PATCH 167/196] fix typo --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a951cb041f..363c2cc385 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ Please mark all change in change log and use the ticket from JIRA. - \#226 - Experimental shards middleware for Milvus ## Improvement -- \#284 - Change C++ SDK to shread library +- \#284 - Change C++ SDK to shared library ## Task From 4d6f69927a5b575bf80321092b00f97fd84206f9 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Tue, 12 Nov 2019 15:38:32 +0800 Subject: [PATCH 168/196] update travis install_dependency.sh --- ci/travis/install_dependency.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index 5b4a5e1482..a1bff278a0 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -28,6 +28,7 @@ sudo apt-get install -y -q --no-install-recommends \ libgtest-dev \ libarrow-dev \ libjemalloc-dev \ + libboost_serialization-dev \ libboost-filesystem-dev \ libboost-system-dev \ libboost-regex-dev \ From ff406d0300c23bf6f70b42b4b6ecd723d576c2b6 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Tue, 12 Nov 2019 16:45:27 +0800 Subject: [PATCH 169/196] update travis install_dependency.sh --- ci/travis/install_dependency.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index a1bff278a0..95d7a698db 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -28,7 +28,7 @@ sudo apt-get install -y -q --no-install-recommends \ libgtest-dev \ libarrow-dev \ libjemalloc-dev \ - libboost_serialization-dev \ + libboost-serialization-dev \ libboost-filesystem-dev \ libboost-system-dev \ libboost-regex-dev \ From 636313622c3186b195129d8aeefe42033239d9c7 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Tue, 12 Nov 2019 16:50:22 +0800 Subject: [PATCH 170/196] update travis travis_env_common.sh --- ci/travis/travis_env_common.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/travis/travis_env_common.sh b/ci/travis/travis_env_common.sh index ac63d2950b..f5f9cbbcf7 100644 --- a/ci/travis/travis_env_common.sh +++ b/ci/travis/travis_env_common.sh @@ -3,7 +3,7 @@ export MILVUS_BUILD_DIR=${TRAVIS_BUILD_DIR}/core/cmake_build export MILVUS_INSTALL_PREFIX=/opt/milvus export MILVUS_TRAVIS_COVERAGE=${MILVUS_TRAVIS_COVERAGE:=0} -if ["$MILVUS_TRAVIS_COVERAGE" == "1"]; then +if [ "${MILVUS_TRAVIS_COVERAGE}" == "1" ]; then export MILVUS_CPP_COVERAGE_FILE=${TRAVIS_BUILD_DIR}/output_new.info fi From d04f7b56a2de36fad250a5aa599ce9bd696bc5f6 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Tue, 12 Nov 2019 17:11:32 +0800 Subject: [PATCH 171/196] update FindGTest.cmake --- core/cmake/FindGTest.cmake | 52 ++++++++++++++-------------- core/src/index/cmake/FindGTest.cmake | 52 ++++++++++++++-------------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/core/cmake/FindGTest.cmake b/core/cmake/FindGTest.cmake index d746e40b05..3e14b49b0e 100644 --- a/core/cmake/FindGTest.cmake +++ b/core/cmake/FindGTest.cmake @@ -28,10 +28,10 @@ Imported targets This module defines the following :prop_tgt:`IMPORTED` targets: -``GTest::GTest`` +``gtest`` The Google Test ``gtest`` library, if found; adds Thread::Thread automatically -``GTest::Main`` +``gtest_main`` The Google Test ``gtest_main`` library, if found ``GMock::GMock`` The Google Mock ``gmock`` library, if found @@ -80,7 +80,7 @@ Example usage find_package(GTest REQUIRED) add_executable(foo foo.cc) - target_link_libraries(foo GTest::GTest GTest::Main) + target_link_libraries(foo gtest gtest_main) add_test(AllTestsInFoo foo) @@ -228,51 +228,51 @@ if(GTEST_FOUND) find_package(Threads QUIET) - if(NOT TARGET GTest::GTest) + if(NOT TARGET gtest) __gtest_determine_library_type(GTEST_LIBRARY) - add_library(GTest::GTest ${GTEST_LIBRARY_TYPE} IMPORTED) + add_library(gtest ${GTEST_LIBRARY_TYPE} IMPORTED) if(TARGET Threads::Threads) - set_target_properties(GTest::GTest PROPERTIES + set_target_properties(gtest PROPERTIES INTERFACE_LINK_LIBRARIES Threads::Threads) endif() if(GTEST_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(GTest::GTest PROPERTIES + set_target_properties(gtest PROPERTIES INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") endif() if(GTEST_INCLUDE_DIRS) - set_target_properties(GTest::GTest PROPERTIES + set_target_properties(gtest PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") endif() - __gtest_import_library(GTest::GTest GTEST_LIBRARY "") - __gtest_import_library(GTest::GTest GTEST_LIBRARY "RELEASE") - __gtest_import_library(GTest::GTest GTEST_LIBRARY "DEBUG") + __gtest_import_library(gtest GTEST_LIBRARY "") + __gtest_import_library(gtest GTEST_LIBRARY "RELEASE") + __gtest_import_library(gtest GTEST_LIBRARY "DEBUG") endif() - if(NOT TARGET GTest::Main) + if(NOT TARGET gtest_main) __gtest_determine_library_type(GTEST_MAIN_LIBRARY) - add_library(GTest::Main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) - set_target_properties(GTest::Main PROPERTIES - INTERFACE_LINK_LIBRARIES "GTest::GTest") - __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "") - __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "RELEASE") - __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "DEBUG") + add_library(gtest_main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) + set_target_properties(gtest_main PROPERTIES + INTERFACE_LINK_LIBRARIES "gtest") + __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "") + __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "RELEASE") + __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "DEBUG") endif() - if(NOT TARGET GTest::GMock) + if(NOT TARGET gmock) __gtest_determine_library_type(GMOCK_LIBRARY) - add_library(GTest::GMock ${GMOCK_LIBRARY_TYPE} IMPORTED) + add_library(gmock ${GMOCK_LIBRARY_TYPE} IMPORTED) if(TARGET Threads::Threads) - set_target_properties(GTest::GMock PROPERTIES + set_target_properties(gmock PROPERTIES INTERFACE_LINK_LIBRARIES Threads::Threads) endif() if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(GTest::GMock PROPERTIES + set_target_properties(gmock PROPERTIES INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") endif() if(GTEST_INCLUDE_DIRS) - set_target_properties(GTest::GMock PROPERTIES + set_target_properties(gmock PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") endif() - __gtest_import_library(GTest::GMock GMOCK_LIBRARY "") - __gtest_import_library(GTest::GMock GMOCK_LIBRARY "RELEASE") - __gtest_import_library(GTest::GMock GMOCK_LIBRARY "DEBUG") + __gtest_import_library(gmock GMOCK_LIBRARY "") + __gtest_import_library(gmock GMOCK_LIBRARY "RELEASE") + __gtest_import_library(gmock GMOCK_LIBRARY "DEBUG") endif() endif() diff --git a/core/src/index/cmake/FindGTest.cmake b/core/src/index/cmake/FindGTest.cmake index d746e40b05..3e14b49b0e 100644 --- a/core/src/index/cmake/FindGTest.cmake +++ b/core/src/index/cmake/FindGTest.cmake @@ -28,10 +28,10 @@ Imported targets This module defines the following :prop_tgt:`IMPORTED` targets: -``GTest::GTest`` +``gtest`` The Google Test ``gtest`` library, if found; adds Thread::Thread automatically -``GTest::Main`` +``gtest_main`` The Google Test ``gtest_main`` library, if found ``GMock::GMock`` The Google Mock ``gmock`` library, if found @@ -80,7 +80,7 @@ Example usage find_package(GTest REQUIRED) add_executable(foo foo.cc) - target_link_libraries(foo GTest::GTest GTest::Main) + target_link_libraries(foo gtest gtest_main) add_test(AllTestsInFoo foo) @@ -228,51 +228,51 @@ if(GTEST_FOUND) find_package(Threads QUIET) - if(NOT TARGET GTest::GTest) + if(NOT TARGET gtest) __gtest_determine_library_type(GTEST_LIBRARY) - add_library(GTest::GTest ${GTEST_LIBRARY_TYPE} IMPORTED) + add_library(gtest ${GTEST_LIBRARY_TYPE} IMPORTED) if(TARGET Threads::Threads) - set_target_properties(GTest::GTest PROPERTIES + set_target_properties(gtest PROPERTIES INTERFACE_LINK_LIBRARIES Threads::Threads) endif() if(GTEST_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(GTest::GTest PROPERTIES + set_target_properties(gtest PROPERTIES INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") endif() if(GTEST_INCLUDE_DIRS) - set_target_properties(GTest::GTest PROPERTIES + set_target_properties(gtest PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") endif() - __gtest_import_library(GTest::GTest GTEST_LIBRARY "") - __gtest_import_library(GTest::GTest GTEST_LIBRARY "RELEASE") - __gtest_import_library(GTest::GTest GTEST_LIBRARY "DEBUG") + __gtest_import_library(gtest GTEST_LIBRARY "") + __gtest_import_library(gtest GTEST_LIBRARY "RELEASE") + __gtest_import_library(gtest GTEST_LIBRARY "DEBUG") endif() - if(NOT TARGET GTest::Main) + if(NOT TARGET gtest_main) __gtest_determine_library_type(GTEST_MAIN_LIBRARY) - add_library(GTest::Main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) - set_target_properties(GTest::Main PROPERTIES - INTERFACE_LINK_LIBRARIES "GTest::GTest") - __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "") - __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "RELEASE") - __gtest_import_library(GTest::Main GTEST_MAIN_LIBRARY "DEBUG") + add_library(gtest_main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) + set_target_properties(gtest_main PROPERTIES + INTERFACE_LINK_LIBRARIES "gtest") + __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "") + __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "RELEASE") + __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "DEBUG") endif() - if(NOT TARGET GTest::GMock) + if(NOT TARGET gmock) __gtest_determine_library_type(GMOCK_LIBRARY) - add_library(GTest::GMock ${GMOCK_LIBRARY_TYPE} IMPORTED) + add_library(gmock ${GMOCK_LIBRARY_TYPE} IMPORTED) if(TARGET Threads::Threads) - set_target_properties(GTest::GMock PROPERTIES + set_target_properties(gmock PROPERTIES INTERFACE_LINK_LIBRARIES Threads::Threads) endif() if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(GTest::GMock PROPERTIES + set_target_properties(gmock PROPERTIES INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") endif() if(GTEST_INCLUDE_DIRS) - set_target_properties(GTest::GMock PROPERTIES + set_target_properties(gmock PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") endif() - __gtest_import_library(GTest::GMock GMOCK_LIBRARY "") - __gtest_import_library(GTest::GMock GMOCK_LIBRARY "RELEASE") - __gtest_import_library(GTest::GMock GMOCK_LIBRARY "DEBUG") + __gtest_import_library(gmock GMOCK_LIBRARY "") + __gtest_import_library(gmock GMOCK_LIBRARY "RELEASE") + __gtest_import_library(gmock GMOCK_LIBRARY "DEBUG") endif() endif() From 9c954b562ab8c8f3204235ee03e589f8874b0ac4 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Tue, 12 Nov 2019 17:30:19 +0800 Subject: [PATCH 172/196] delete FindGTest.cmake --- ci/scripts/build.sh | 1 - ci/travis/install_dependency.sh | 1 - core/cmake/FindGTest.cmake | 278 --------------------------- core/src/index/cmake/FindGTest.cmake | 278 --------------------------- 4 files changed, 558 deletions(-) delete mode 100644 core/cmake/FindGTest.cmake delete mode 100644 core/src/index/cmake/FindGTest.cmake diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 0875ac1dd1..a8bd339ff5 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -96,7 +96,6 @@ CMAKE_CMD="cmake \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ -DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ -DArrow_SOURCE=AUTO \ --DGTest_SOURCE=AUTO \ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} ${CMAKE_CMD} diff --git a/ci/travis/install_dependency.sh b/ci/travis/install_dependency.sh index 95d7a698db..e9efd1f441 100755 --- a/ci/travis/install_dependency.sh +++ b/ci/travis/install_dependency.sh @@ -25,7 +25,6 @@ sudo apt-get install -y -q --no-install-recommends \ automake \ ccache \ pkg-config \ - libgtest-dev \ libarrow-dev \ libjemalloc-dev \ libboost-serialization-dev \ diff --git a/core/cmake/FindGTest.cmake b/core/cmake/FindGTest.cmake deleted file mode 100644 index 3e14b49b0e..0000000000 --- a/core/cmake/FindGTest.cmake +++ /dev/null @@ -1,278 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Originally imported from the CMake project at commit -# df4ed1e9ffcdb6b99ccff9e6f44808fdd2abda56 with the following license header: -# -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing for details. - - -#[=======================================================================[.rst: -FindGTest ---------- - -Locate the Google C++ Testing Framework. - -Imported targets -^^^^^^^^^^^^^^^^ - -This module defines the following :prop_tgt:`IMPORTED` targets: - -``gtest`` - The Google Test ``gtest`` library, if found; adds Thread::Thread - automatically -``gtest_main`` - The Google Test ``gtest_main`` library, if found -``GMock::GMock`` - The Google Mock ``gmock`` library, if found - - -Result variables -^^^^^^^^^^^^^^^^ - -This module will set the following variables in your project: - -``GTEST_FOUND`` - Found the Google Testing framework -``GTEST_INCLUDE_DIRS`` - the directory containing the Google Test headers - -The library variables below are set as normal variables. These -contain debug/optimized keywords when a debugging library is found. - -``GTEST_LIBRARIES`` - The Google Test ``gtest`` library; note it also requires linking - with an appropriate thread library -``GTEST_MAIN_LIBRARIES`` - The Google Test ``gtest_main`` library -``GTEST_BOTH_LIBRARIES`` - Both ``gtest`` and ``gtest_main`` - -Cache variables -^^^^^^^^^^^^^^^ - -The following cache variables may also be set: - -``GTEST_ROOT`` - The root directory of the Google Test installation (may also be - set as an environment variable) -``GTEST_MSVC_SEARCH`` - If compiling with MSVC, this variable can be set to ``MT`` or - ``MD`` (the default) to enable searching a GTest build tree - - -Example usage -^^^^^^^^^^^^^ - -:: - - enable_testing() - find_package(GTest REQUIRED) - - add_executable(foo foo.cc) - target_link_libraries(foo gtest gtest_main) - - add_test(AllTestsInFoo foo) - - -Deeper integration with CTest -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -See :module:`GoogleTest` for information on the :command:`gtest_add_tests` -and :command:`gtest_discover_tests` commands. -#]=======================================================================] - -# include(${CMAKE_CURRENT_LIST_DIR}/GoogleTest.cmake) - -function(__gtest_append_debugs _endvar _library) - if(${_library} AND ${_library}_DEBUG) - set(_output optimized ${${_library}} debug ${${_library}_DEBUG}) - else() - set(_output ${${_library}}) - endif() - set(${_endvar} ${_output} PARENT_SCOPE) -endfunction() - -function(__gtest_find_library _name) - find_library(${_name} - NAMES ${ARGN} - HINTS - ENV GTEST_ROOT - ${GTEST_ROOT} - PATH_SUFFIXES ${_gtest_libpath_suffixes} - ) - mark_as_advanced(${_name}) -endfunction() - -macro(__gtest_determine_windows_library_type _var) - if(EXISTS "${${_var}}") - file(TO_NATIVE_PATH "${${_var}}" _lib_path) - get_filename_component(_name "${${_var}}" NAME_WE) - file(STRINGS "${${_var}}" _match REGEX "${_name}\\.dll" LIMIT_COUNT 1) - if(NOT _match STREQUAL "") - set(${_var}_TYPE SHARED PARENT_SCOPE) - else() - set(${_var}_TYPE UNKNOWN PARENT_SCOPE) - endif() - return() - endif() -endmacro() - -function(__gtest_determine_library_type _var) - if(WIN32) - # For now, at least, only Windows really needs to know the library type - __gtest_determine_windows_library_type(${_var}) - __gtest_determine_windows_library_type(${_var}_RELEASE) - __gtest_determine_windows_library_type(${_var}_DEBUG) - endif() - # If we get here, no determination was made from the above checks - set(${_var}_TYPE UNKNOWN PARENT_SCOPE) -endfunction() - -function(__gtest_import_library _target _var _config) - if(_config) - set(_config_suffix "_${_config}") - else() - set(_config_suffix "") - endif() - - set(_lib "${${_var}${_config_suffix}}") - if(EXISTS "${_lib}") - if(_config) - set_property(TARGET ${_target} APPEND PROPERTY - IMPORTED_CONFIGURATIONS ${_config}) - endif() - set_target_properties(${_target} PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES${_config_suffix} "CXX") - if(WIN32 AND ${_var}_TYPE STREQUAL SHARED) - set_target_properties(${_target} PROPERTIES - IMPORTED_IMPLIB${_config_suffix} "${_lib}") - else() - set_target_properties(${_target} PROPERTIES - IMPORTED_LOCATION${_config_suffix} "${_lib}") - endif() - endif() -endfunction() - -# - -if(NOT DEFINED GTEST_MSVC_SEARCH) - set(GTEST_MSVC_SEARCH MD) -endif() - -set(_gtest_libpath_suffixes lib) -if(MSVC) - if(GTEST_MSVC_SEARCH STREQUAL "MD") - list(APPEND _gtest_libpath_suffixes - msvc/gtest-md/Debug - msvc/gtest-md/Release - msvc/x64/Debug - msvc/x64/Release - ) - elseif(GTEST_MSVC_SEARCH STREQUAL "MT") - list(APPEND _gtest_libpath_suffixes - msvc/gtest/Debug - msvc/gtest/Release - msvc/x64/Debug - msvc/x64/Release - ) - endif() -endif() - - -find_path(GTEST_INCLUDE_DIR gtest/gtest.h - HINTS - $ENV{GTEST_ROOT}/include - ${GTEST_ROOT}/include - PATH_SUFFIXES ${LIB_PATH_SUFFIXES} -) -mark_as_advanced(GTEST_INCLUDE_DIR) - -if(MSVC AND GTEST_MSVC_SEARCH STREQUAL "MD") - # The provided /MD project files for Google Test add -md suffixes to the - # library names. - __gtest_find_library(GTEST_LIBRARY gtest-md gtest) - __gtest_find_library(GTEST_LIBRARY_DEBUG gtest-mdd gtestd) - __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main-md gtest_main) - __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_main-mdd gtest_maind) - __gtest_find_library(GMOCK_LIBRARY gmock-md gmock) - __gtest_find_library(GMOCK_LIBRARY_DEBUG gmock-mdd gmockd) -else() - __gtest_find_library(GTEST_LIBRARY gtest) - __gtest_find_library(GTEST_LIBRARY_DEBUG gtestd) - __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main) - __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_maind) - __gtest_find_library(GMOCK_LIBRARY gmock) - __gtest_find_library(GMOCK_LIBRARY_DEBUG gtestd) -endif() - -include(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(GTest DEFAULT_MSG GTEST_LIBRARY GTEST_INCLUDE_DIR GTEST_MAIN_LIBRARY GMOCK_LIBRARY) - -if(GTEST_FOUND) - set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIR}) - __gtest_append_debugs(GTEST_LIBRARIES GTEST_LIBRARY) - __gtest_append_debugs(GTEST_MAIN_LIBRARIES GTEST_MAIN_LIBRARY) - __gtest_append_debugs(GMOCK_LIBRARIES GMOCK_LIBRARY) - set(GTEST_BOTH_LIBRARIES ${GTEST_LIBRARIES} ${GTEST_MAIN_LIBRARIES}) - - find_package(Threads QUIET) - - if(NOT TARGET gtest) - __gtest_determine_library_type(GTEST_LIBRARY) - add_library(gtest ${GTEST_LIBRARY_TYPE} IMPORTED) - if(TARGET Threads::Threads) - set_target_properties(gtest PROPERTIES - INTERFACE_LINK_LIBRARIES Threads::Threads) - endif() - if(GTEST_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(gtest PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") - endif() - if(GTEST_INCLUDE_DIRS) - set_target_properties(gtest PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") - endif() - __gtest_import_library(gtest GTEST_LIBRARY "") - __gtest_import_library(gtest GTEST_LIBRARY "RELEASE") - __gtest_import_library(gtest GTEST_LIBRARY "DEBUG") - endif() - if(NOT TARGET gtest_main) - __gtest_determine_library_type(GTEST_MAIN_LIBRARY) - add_library(gtest_main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) - set_target_properties(gtest_main PROPERTIES - INTERFACE_LINK_LIBRARIES "gtest") - __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "") - __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "RELEASE") - __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "DEBUG") - endif() - if(NOT TARGET gmock) - __gtest_determine_library_type(GMOCK_LIBRARY) - add_library(gmock ${GMOCK_LIBRARY_TYPE} IMPORTED) - if(TARGET Threads::Threads) - set_target_properties(gmock PROPERTIES - INTERFACE_LINK_LIBRARIES Threads::Threads) - endif() - if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(gmock PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") - endif() - if(GTEST_INCLUDE_DIRS) - set_target_properties(gmock PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") - endif() - __gtest_import_library(gmock GMOCK_LIBRARY "") - __gtest_import_library(gmock GMOCK_LIBRARY "RELEASE") - __gtest_import_library(gmock GMOCK_LIBRARY "DEBUG") - endif() -endif() diff --git a/core/src/index/cmake/FindGTest.cmake b/core/src/index/cmake/FindGTest.cmake deleted file mode 100644 index 3e14b49b0e..0000000000 --- a/core/src/index/cmake/FindGTest.cmake +++ /dev/null @@ -1,278 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Originally imported from the CMake project at commit -# df4ed1e9ffcdb6b99ccff9e6f44808fdd2abda56 with the following license header: -# -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing for details. - - -#[=======================================================================[.rst: -FindGTest ---------- - -Locate the Google C++ Testing Framework. - -Imported targets -^^^^^^^^^^^^^^^^ - -This module defines the following :prop_tgt:`IMPORTED` targets: - -``gtest`` - The Google Test ``gtest`` library, if found; adds Thread::Thread - automatically -``gtest_main`` - The Google Test ``gtest_main`` library, if found -``GMock::GMock`` - The Google Mock ``gmock`` library, if found - - -Result variables -^^^^^^^^^^^^^^^^ - -This module will set the following variables in your project: - -``GTEST_FOUND`` - Found the Google Testing framework -``GTEST_INCLUDE_DIRS`` - the directory containing the Google Test headers - -The library variables below are set as normal variables. These -contain debug/optimized keywords when a debugging library is found. - -``GTEST_LIBRARIES`` - The Google Test ``gtest`` library; note it also requires linking - with an appropriate thread library -``GTEST_MAIN_LIBRARIES`` - The Google Test ``gtest_main`` library -``GTEST_BOTH_LIBRARIES`` - Both ``gtest`` and ``gtest_main`` - -Cache variables -^^^^^^^^^^^^^^^ - -The following cache variables may also be set: - -``GTEST_ROOT`` - The root directory of the Google Test installation (may also be - set as an environment variable) -``GTEST_MSVC_SEARCH`` - If compiling with MSVC, this variable can be set to ``MT`` or - ``MD`` (the default) to enable searching a GTest build tree - - -Example usage -^^^^^^^^^^^^^ - -:: - - enable_testing() - find_package(GTest REQUIRED) - - add_executable(foo foo.cc) - target_link_libraries(foo gtest gtest_main) - - add_test(AllTestsInFoo foo) - - -Deeper integration with CTest -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -See :module:`GoogleTest` for information on the :command:`gtest_add_tests` -and :command:`gtest_discover_tests` commands. -#]=======================================================================] - -# include(${CMAKE_CURRENT_LIST_DIR}/GoogleTest.cmake) - -function(__gtest_append_debugs _endvar _library) - if(${_library} AND ${_library}_DEBUG) - set(_output optimized ${${_library}} debug ${${_library}_DEBUG}) - else() - set(_output ${${_library}}) - endif() - set(${_endvar} ${_output} PARENT_SCOPE) -endfunction() - -function(__gtest_find_library _name) - find_library(${_name} - NAMES ${ARGN} - HINTS - ENV GTEST_ROOT - ${GTEST_ROOT} - PATH_SUFFIXES ${_gtest_libpath_suffixes} - ) - mark_as_advanced(${_name}) -endfunction() - -macro(__gtest_determine_windows_library_type _var) - if(EXISTS "${${_var}}") - file(TO_NATIVE_PATH "${${_var}}" _lib_path) - get_filename_component(_name "${${_var}}" NAME_WE) - file(STRINGS "${${_var}}" _match REGEX "${_name}\\.dll" LIMIT_COUNT 1) - if(NOT _match STREQUAL "") - set(${_var}_TYPE SHARED PARENT_SCOPE) - else() - set(${_var}_TYPE UNKNOWN PARENT_SCOPE) - endif() - return() - endif() -endmacro() - -function(__gtest_determine_library_type _var) - if(WIN32) - # For now, at least, only Windows really needs to know the library type - __gtest_determine_windows_library_type(${_var}) - __gtest_determine_windows_library_type(${_var}_RELEASE) - __gtest_determine_windows_library_type(${_var}_DEBUG) - endif() - # If we get here, no determination was made from the above checks - set(${_var}_TYPE UNKNOWN PARENT_SCOPE) -endfunction() - -function(__gtest_import_library _target _var _config) - if(_config) - set(_config_suffix "_${_config}") - else() - set(_config_suffix "") - endif() - - set(_lib "${${_var}${_config_suffix}}") - if(EXISTS "${_lib}") - if(_config) - set_property(TARGET ${_target} APPEND PROPERTY - IMPORTED_CONFIGURATIONS ${_config}) - endif() - set_target_properties(${_target} PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES${_config_suffix} "CXX") - if(WIN32 AND ${_var}_TYPE STREQUAL SHARED) - set_target_properties(${_target} PROPERTIES - IMPORTED_IMPLIB${_config_suffix} "${_lib}") - else() - set_target_properties(${_target} PROPERTIES - IMPORTED_LOCATION${_config_suffix} "${_lib}") - endif() - endif() -endfunction() - -# - -if(NOT DEFINED GTEST_MSVC_SEARCH) - set(GTEST_MSVC_SEARCH MD) -endif() - -set(_gtest_libpath_suffixes lib) -if(MSVC) - if(GTEST_MSVC_SEARCH STREQUAL "MD") - list(APPEND _gtest_libpath_suffixes - msvc/gtest-md/Debug - msvc/gtest-md/Release - msvc/x64/Debug - msvc/x64/Release - ) - elseif(GTEST_MSVC_SEARCH STREQUAL "MT") - list(APPEND _gtest_libpath_suffixes - msvc/gtest/Debug - msvc/gtest/Release - msvc/x64/Debug - msvc/x64/Release - ) - endif() -endif() - - -find_path(GTEST_INCLUDE_DIR gtest/gtest.h - HINTS - $ENV{GTEST_ROOT}/include - ${GTEST_ROOT}/include - PATH_SUFFIXES ${LIB_PATH_SUFFIXES} -) -mark_as_advanced(GTEST_INCLUDE_DIR) - -if(MSVC AND GTEST_MSVC_SEARCH STREQUAL "MD") - # The provided /MD project files for Google Test add -md suffixes to the - # library names. - __gtest_find_library(GTEST_LIBRARY gtest-md gtest) - __gtest_find_library(GTEST_LIBRARY_DEBUG gtest-mdd gtestd) - __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main-md gtest_main) - __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_main-mdd gtest_maind) - __gtest_find_library(GMOCK_LIBRARY gmock-md gmock) - __gtest_find_library(GMOCK_LIBRARY_DEBUG gmock-mdd gmockd) -else() - __gtest_find_library(GTEST_LIBRARY gtest) - __gtest_find_library(GTEST_LIBRARY_DEBUG gtestd) - __gtest_find_library(GTEST_MAIN_LIBRARY gtest_main) - __gtest_find_library(GTEST_MAIN_LIBRARY_DEBUG gtest_maind) - __gtest_find_library(GMOCK_LIBRARY gmock) - __gtest_find_library(GMOCK_LIBRARY_DEBUG gtestd) -endif() - -include(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(GTest DEFAULT_MSG GTEST_LIBRARY GTEST_INCLUDE_DIR GTEST_MAIN_LIBRARY GMOCK_LIBRARY) - -if(GTEST_FOUND) - set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIR}) - __gtest_append_debugs(GTEST_LIBRARIES GTEST_LIBRARY) - __gtest_append_debugs(GTEST_MAIN_LIBRARIES GTEST_MAIN_LIBRARY) - __gtest_append_debugs(GMOCK_LIBRARIES GMOCK_LIBRARY) - set(GTEST_BOTH_LIBRARIES ${GTEST_LIBRARIES} ${GTEST_MAIN_LIBRARIES}) - - find_package(Threads QUIET) - - if(NOT TARGET gtest) - __gtest_determine_library_type(GTEST_LIBRARY) - add_library(gtest ${GTEST_LIBRARY_TYPE} IMPORTED) - if(TARGET Threads::Threads) - set_target_properties(gtest PROPERTIES - INTERFACE_LINK_LIBRARIES Threads::Threads) - endif() - if(GTEST_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(gtest PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") - endif() - if(GTEST_INCLUDE_DIRS) - set_target_properties(gtest PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") - endif() - __gtest_import_library(gtest GTEST_LIBRARY "") - __gtest_import_library(gtest GTEST_LIBRARY "RELEASE") - __gtest_import_library(gtest GTEST_LIBRARY "DEBUG") - endif() - if(NOT TARGET gtest_main) - __gtest_determine_library_type(GTEST_MAIN_LIBRARY) - add_library(gtest_main ${GTEST_MAIN_LIBRARY_TYPE} IMPORTED) - set_target_properties(gtest_main PROPERTIES - INTERFACE_LINK_LIBRARIES "gtest") - __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "") - __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "RELEASE") - __gtest_import_library(gtest_main GTEST_MAIN_LIBRARY "DEBUG") - endif() - if(NOT TARGET gmock) - __gtest_determine_library_type(GMOCK_LIBRARY) - add_library(gmock ${GMOCK_LIBRARY_TYPE} IMPORTED) - if(TARGET Threads::Threads) - set_target_properties(gmock PROPERTIES - INTERFACE_LINK_LIBRARIES Threads::Threads) - endif() - if(GMOCK_LIBRARY_TYPE STREQUAL "SHARED") - set_target_properties(gmock PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") - endif() - if(GTEST_INCLUDE_DIRS) - set_target_properties(gmock PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIRS}") - endif() - __gtest_import_library(gmock GMOCK_LIBRARY "") - __gtest_import_library(gmock GMOCK_LIBRARY "RELEASE") - __gtest_import_library(gmock GMOCK_LIBRARY "DEBUG") - endif() -endif() From 4493a6248dc2f648687db94ea728d56499617e1e Mon Sep 17 00:00:00 2001 From: jielinxu <52057195+jielinxu@users.noreply.github.com> Date: Tue, 12 Nov 2019 18:55:32 +0800 Subject: [PATCH 173/196] [skip ci] Create Mishards README --- shards/README.md | 264 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 264 insertions(+) create mode 100644 shards/README.md diff --git a/shards/README.md b/shards/README.md new file mode 100644 index 0000000000..dba6f785aa --- /dev/null +++ b/shards/README.md @@ -0,0 +1,264 @@ +# Mishards - An Experimental Sharding Middleware + +Milvus aims to achieve efficient similarity search and analytics for massive-scale vectors. A standalone Milvus instance can easily handle vector search among billion-scale vectors. However, for 10 billion, 100 billion or even larger datasets, a Milvus cluster is needed. + +Ideally, this cluster can be accessed and used just as the standalone instance, meanwhile it satisfies the business requirements such as low latency and high concurrency. + +This page meant to demonstrates how to use Mishards, an experimental sharding middleware for Milvus, to establish an orchestrated cluster. + +## What is Mishards + +Mishards is a middleware that is developed using Python. It provides unlimited extension of memory and computation capacity through request forwarding, read/write splitting, horizontal scalability and dynamic extension. It works as the proxy of the Milvus system. + +Using Mishards in Milvus cluster deployment is an experimental feature available for user test and feedback. + +## How Mishards works + +Mishards splits the upstream requests to sub-requests and forwards them to Milvus servers. When the search computation is completed, all results are collected by Mishards and sent back to the client. + +Below graph is a demonstration of the process: + +![mishards](https://raw.githubusercontent.com/milvus-io/docs/master/assets/mishards.png) + +## Mishards example codes + +Below examples codes demonstrate how to build from source code a Milvus server with Mishards on a standalone machine, as well as how to use Kubernetes to establish Milvus cluster with Mishards. + +Before executing these examples, make sure you meet the prerequisites of [Milvus installation](https://www.milvus.io/docs/en/userguide/install_milvus/). + +### Build from source code + +#### Prequisites + +Make sure Python 3.6 or higher is installed. + +#### Start Milvus and Mishards from source code + +Follow below steps to start a standalone Milvus instance with Mishards from source code: + +1. Clone milvus repository. + + ```shell + git clone + ``` + +2. Install Mishards dependencies. + + ```shell + $ cd milvus/shards + $ pip install -r requirements.txt + ``` + +3. Start Milvus server. + + ```shell + $ sudo nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b + ``` + +4. Update path permissions. + + ```shell + $ sudo chown -R $USER:$USER /tmp/milvus + ``` + +5. Configure Mishards environmental variables. + + ```shell + $ cp mishards/.env.example mishards/.env + ``` + +6. Start Mishards server. + + ```shell + $ python mishards/main.py + ``` + +### Docker example + +The `all_in_one` example shows how to use Docker container to start 2 Milvus instances, 1 Mishards instance and 1 Jaeger instance. + + 1. Install [Docker Compose](https://docs.docker.com/compose/install/). + + 2. Build docker images for these instances. + + ```shell + $ make build + ``` + + 3. Start all instances. + + ```shell + $ make deploy + ``` + + 4. Confirm instance status. + + ```shell + $ make probe_deploy + Pass ==> Pass: Connected + Fail ==> Error: Fail connecting to server on 127.0.0.1:19530. Timeout + ``` + +To check the service tracing, open the [Jaeger page](http://127.0.0.1:16686/) on your browser. + +![jaegerui](https://raw.githubusercontent.com/milvus-io/docs/master/assets/jaegerui.png) + +![jaegertraces](https://raw.githubusercontent.com/milvus-io/docs/master/assets/jaegertraces.png) + +To stop all instances, use the following command: + +```shell +$ make clean_deploy +``` + +### Kubernetes example + +Using Kubernetes to deploy Milvus cluster requires that the developers have a basic understanding of [general concepts](https://kubernetes.io/docs/concepts/) of Kubernetes. + +This example mainly demonstrates how to use Kubernetes to establish a Milvus cluster containing 2 Milvus instances(1 read instance and 1 write instance), 1 MySQL instance and 1 Mishards instance. + +This example does not include tasks such as setting up Kubernetes cluster, [installing shared storage](https://kubernetes.io/docs/concepts/storage/volumes/) and using command tools such as [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +Below is the architecture of Milvus cluster built upon Kubernetes: + +![k8s_arch](https://raw.githubusercontent.com/milvus-io/docs/master/assets/k8s_arch.png) + +#### Prerequisites + +- A Kubernetes cluster is already established. +- [nvidia-docker 2.0](https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0)) is already installed. +- Shared storage is already installed. +- kubectl is installed and can access the Kubernetes cluster. + +#### Use Kubernetes to build a Milvus cluster + +1. Start Milvus cluster + + ```shell + $ make cluster + ``` + +2. Confirm that Mishards is connected to Milvus. + + ```shell + $ make probe_cluster + Pass ==> Pass: Connected + ``` + +To check cluster status: + +```shell +$ make cluster_status +``` + +To delete the cluster: + +```shell +$ make clean_cluster +``` + +To add a read instance: + +```shell +$ cd kubernetes_demo +$ ./start.sh scale-ro-server 2 +``` + +To add a proxy instance: + +```shell +$ cd kubernetes_demo +$ ./start.sh scale-proxy 2 +``` + +To check cluster logs: + +```shell +$ kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 +``` + +## Mishards Unit test + +**Unit test** + +```shell +$ cd milvus/shards +$ make test +``` + +**Code coverage test** + +```shell +$ cd milvus/shards +$ make coverage +``` + +**Code format check** + +```shell +$ cd milvus/shards +$ make style +``` + +## Mishards configuration + +### Overall configuration + +| Name | Required | Type | Default | Description | +| ------------- | -------- | ------- | ------- | ------------------------------------------------------------ | +| `Debug` | No | boolean | `True` | Choose if to enable `Debug` work mode. | +| `TIMEZONE` | No | string | `UTC` | Timezone | +| `MAX_RETRY` | No | integer | `3` | The maximum retry times allowed to connect to Milvus. | +| `SERVER_PORT` | No | integer | `19530` | Define the server port of Mishards. | +| `WOSERVER` | **Yes** | string | ` ` | Define the address of Milvus write instance. Currently, only static settings are supported. Format for reference: `tcp://127.0.0.1:19530`. | + +### Metadata + +| Name | Required | Type | Default | Description | +| ------------------------------ | -------- | ------- | ------- | ------------------------------------------------------------ | +| `SQLALCHEMY_DATABASE_URI` | **Yes** | string | ` ` | Define the database address for metadata storage. Format standard: RFC-738-style. For example: `mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4`. | +| `SQL_ECHO` | No | boolean | `False` | Choose if to print SQL statements. | +| `SQLALCHEMY_DATABASE_TEST_URI` | No | string | ` ` | Define the database address of metadata storage in test environment. | +| `SQL_TEST_ECHO` | No | boolean | `False` | Choose if to print SQL statements in test environment. | + +### Service discovery + +| Name | Required | Type | Default | Description | +| ------------------------------------- | -------- | ------- | ------------- | ------------------------------------------------------------ | +| `DISCOVERY_PLUGIN_PATH` | No | string | ` ` | Define the search path to locate the plug-in. The default path is used if the value is not set. | +| `DISCOVERY_CLASS_NAME` | No | string | `static` | Under the plug-in search path, search the class based on the class name, and instantiate it. Currently, the system provides 2 classes: `static` and `kubernetes`. | +| `DISCOVERY_STATIC_HOSTS` | No | list | `[]` | When `DISCOVERY_CLASS_NAME` is `static` , define a comma-separated service address list, for example`192.168.1.188,192.168.1.190`. | +| `DISCOVERY_STATIC_PORT` | No | integer | `19530` | When `DISCOVERY_CLASS_NAME` is `static`, define the server port. | +| `DISCOVERY_KUBERNETES_NAMESPACE` | No | string | ` ` | When `DISCOVERY_CLASS_NAME` is `kubernetes`, define the namespace of Milvus cluster. | +| `DISCOVERY_KUBERNETES_IN_CLUSTER` | No | boolean | `False` | When `DISCOVERY_CLASS_NAME` is `kubernetes` , choose if to run the server in Kubernetes. | +| `DISCOVERY_KUBERNETES_POLL_INTERVAL` | No | integer | `5` (Seconds) | When `DISCOVERY_CLASS_NAME` is `kubernetes` , define the listening cycle of the server. | +| `DISCOVERY_KUBERNETES_POD_PATT` | No | string | ` ` | When `DISCOVERY_CLASS_NAME` is `kubernetes` , map the regular expression of Milvus Pod. | +| `DISCOVERY_KUBERNETES_LABEL_SELECTOR` | No | string | ` ` | When `SD_PROVIDER` is `kubernetes`, map the label of Milvus Pod. For example: `tier=ro-servers`. | + +### Tracing + +| Name | Required | Type | Default | Description | +| ----------------------- | -------- | ------- | ---------- | ------------------------------------------------------------ | +| `TRACER_PLUGIN_PATH` | No | string | ` ` | Define the search path to locate the tracing plug-in. The default path is used if the value is not set. | +| `TRACER_CLASS_NAME` | No | string | ` ` | Under the plug-in search path, search the class based on the class name, and instantiate it. Currently, only `Jaeger` is supported. | +| `TRACING_SERVICE_NAME` | No | string | `mishards` | When `TRACING_CLASS_NAME` is [`Jaeger`](https://www.jaegertracing.io/docs/1.14/), the name of the tracing service. | +| `TRACING_SAMPLER_TYPE` | No | string | `const` | When `TRACING_CLASS_NAME` is [`Jaeger`](https://www.jaegertracing.io/docs/1.14/), the [sampling type](https://www.jaegertracing.io/docs/1.14/sampling/) of the tracing service. | +| `TRACING_SAMPLER_PARAM` | No | integer | `1` | When `TRACING_CLASS_NAME` is [`Jaeger`](https://www.jaegertracing.io/docs/1.14/), the [sampling frequency](https://www.jaegertracing.io/docs/1.14/sampling/) of the tracing service. | +| `TRACING_LOG_PAYLOAD` | No | boolean | `False` | When `TRACING_CLASS_NAME` is [`Jaeger`](https://www.jaegertracing.io/docs/1.14/), choose if to sample Payload. | + +### Logging + +| Name | Required | Type | Default | Description | +| ----------- | -------- | ------ | --------------- | ------------------------------------------------------------ | +| `LOG_LEVEL` | No | string | `DEBUG` | Log recording levels. Currently supports `DEBUG` ,`INFO` ,`WARNING` and `ERROR`. | +| `LOG_PATH` | No | string | `/tmp/mishards` | Log recording path. | +| `LOG_NAME` | No | string | `logfile` | Log recording name. | + +### Routing + +| Name | Required | Type | Default | Description | +| ------------------------ | -------- | ------ | ------------------------- | ------------------------------------------------------------ | +| `ROUTER_PLUGIN_PATH` | No | string | ` ` | Define the search path to locate the routing plug-in. The default path is used if the value is not set. | +| `ROUTER_CLASS_NAME` | No | string | `FileBasedHashRingRouter` | Under the plug-in search path, search the class based on the class name, and instantiate it. Currently, only `FileBasedHashRingRouter` is supported. | +| `ROUTER_CLASS_TEST_NAME` | No | string | `FileBasedHashRingRouter` | Under the plug-in search path, search the class based on the class name, and instantiate it. Currently, `FileBasedHashRingRouter` is supported for test environment only. | + From eedb67270ddf7fe094689bb41751d2f5b429ffeb Mon Sep 17 00:00:00 2001 From: jielinxu <52057195+jielinxu@users.noreply.github.com> Date: Tue, 12 Nov 2019 19:04:35 +0800 Subject: [PATCH 174/196] [skip ci] Update README_CN --- shards/README_CN.md | 261 ++++++++++++++++++++++++++++++++++++++++++ shards/Tutorial_CN.md | 147 ------------------------ 2 files changed, 261 insertions(+), 147 deletions(-) create mode 100644 shards/README_CN.md delete mode 100644 shards/Tutorial_CN.md diff --git a/shards/README_CN.md b/shards/README_CN.md new file mode 100644 index 0000000000..24e019d001 --- /dev/null +++ b/shards/README_CN.md @@ -0,0 +1,261 @@ +# Mishards - Milvus 集群分片中间件 + +Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析。单个 Milvus 实例可处理十亿级数据规模,而对于百亿或者千亿级数据,则需要一个 Milvus 集群实例。该实例对于上层应用可以像单机实例一样使用,同时满足海量数据低延迟、高并发业务需求。 + +本文主要展示如何使用 Mishards 分片中间件来搭建 Milvus 集群。 + +## Mishards 是什么 + +Mishards 是一个用 Python 开发的 Milvus 集群分片中间件,其内部处理请求转发、读写分离、水平扩展、动态扩容,为用户提供内存和算力可以无限扩容的 Milvus 实例。 + +Mishards 的设计尚未完成,属于试用功能,希望大家多多测试、提供反馈。 + +## Mishards 如何工作 + +Mishards 负责将上游请求拆分,并路由到内部各细分子服务,最后将子服务结果汇总,返回给上游。 + +![mishards](https://raw.githubusercontent.com/milvus-io/docs/master/assets/mishards.png) + +## Mishards 相关示例 + +以下分别向您展示如何使用源代码在单机上启动 Mishards 和 Milvus 服务,以及如何使用 Kubernetes 启动 Milvus 集群和 Mishards。 + +Milvus 启动的前提条件请参考 [Milvus 安装](https://www.milvus.io/docs/zh-CN/userguide/install_milvus/)。 + +### 源代码启动示例 + +#### 前提条件 + +Python 版本为3.6及以上。 + +#### 源代码启动 Milvus 和 Mishards 实例 + +请按照以下步骤在单机上启动单个 Milvus 实例和 Mishards 服务: + +1. 将 milvus repository 复制到本地。 + + ```shell + git clone + ``` + +2. 安装 Mishards 的依赖库。 + + ```shell + $ cd milvus/shards + $ pip install -r requirements.txt + ``` + +3. 启动 Milvus 服务。 + + ```shell + $ sudo nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b + ``` + +4. 更改目录权限。 + + ```shell + $ sudo chown -R $USER:$USER /tmp/milvus + ``` + +5. 配置 Mishards 环境变量 + + ```shell + $ cp mishards/.env.example mishards/.env + ``` + +6. 启动 Mishards 服务 + + ```shell + $ python mishards/main.py + ``` + +### Docker 示例 + +`all_in_one` 使用 Docker 容器启动2个 Milvus 实例,1个 Mishards 中间件实例,和1个 Jaeger 链路追踪实例。 + + 1. 安装 [Docker Compose](https://docs.docker.com/compose/install/)。 + + 2. 制作实例镜像。 + + ```shell + $ make build + ``` + + 3. 启动所有服务。 + + ```shell + $ make deploy + ``` + + 4. 检查确认服务状态。 + + ```shell + $ make probe_deploy + Pass ==> Pass: Connected + Fail ==> Error: Fail connecting to server on 127.0.0.1:19530. Timeout + ``` + +若要查看服务踪迹,使用浏览器打开 [Jaeger 页面](http://127.0.0.1:16686/)。 + +![jaegerui](https://github.com/milvus-io/docs/blob/master/assets/jaegerui.png) + +![jaegertraces](https://github.com/milvus-io/docs/blob/master/assets/jaegertraces.png) + +若要清理所有服务,请使用如下命令: + +```shell +$ make clean_deploy +``` + +### Kubernetes 示例 + +使用 Kubernetes 部署 Milvus 分布式集群要求开发人员对 Kubernetes 的[基本概念](https://kubernetes.io/docs/concepts/)和操作有基本了解。 + +本示例主要展示如何使用 Kubernetes 搭建 Milvus 集群,包含2个 Milvus 实例(1个可读实例,1个可写实例)、1个 MySQL 实例和1个 Mishards 实例。 + +本示例不包括如何搭建 Kubernetes 集群,如何安装[共享存储](https://kubernetes.io/docs/concepts/storage/volumes/)和如何安装 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 命令行工具等。 + +以下是 Kubernetes 示例架构图: + +![k8s_arch](https://github.com/milvus-io/docs/blob/master/assets/k8s_arch.png) + +#### 前提条件 + +使用 Kubernetes 启动多个 Milvus 实例之前,请确保您已满足以下条件: + +- 已创建 Kubernetes 集群 +- 已安装 [nvidia-docker 2.0](https://github.com/nvidia/nvidia-docker/wiki/Installation-(version-2.0)) +- 已安装共享存储 +- 已安装 kubectl,且能访问集群 + +#### Kubernetes 启动集群 + +1. 启动 Milvus 集群。 + + ```shell + $ make cluster + ``` + +2. 确认 Mishards 是否可用。 + + ```shell + $ make probe_cluster + Pass ==> Pass: Connected + ``` + +查看集群状态: + +```shell +$ make cluster_status +``` + +删除 Milvus 集群: + +```shell +$ make clean_cluster +``` + +扩容 Milvus 可读实例到2个: + +```shell +$ cd kubernetes_demo +$ ./start.sh scale-ro-server 2 +``` + +扩容 Mishards(代理)实例到2个: + +```shell +$ cd kubernetes_demo +$ ./start.sh scale-proxy 2 +``` + +查看计算节点 `milvus-ro-servers-0` 日志: + +```shell +$ kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 +``` + +## 单元测试 + +**单元测试** + +```shell +$ cd milvus/shards +$ make test +``` + +**代码覆盖率测试** + +```shell +$ cd milvus/shards +$ make coverage +``` + +**代码格式检查** + +```shell +$ cd milvus/shards +$ make style +``` + +## Mishards 配置 + +### 全局配置 + +| 参数 | 是否必填 | 类型 | 默认值 | 说明 | +| ------------- | -------- | ------- | ------- | ------------------------------------------------------------ | +| `Debug` | No | boolean | `True` | 选择是否启用 `Debug` 工作模式。 | +| `TIMEZONE` | No | string | `UTC` | 时区 | +| `MAX_RETRY` | No | integer | `3` | Mishards 连接 Milvus 的最大重试次数。 | +| `SERVER_PORT` | No | integer | `19530` | 定义 Mishards 的服务端口。 | +| `WOSERVER` | **Yes** | string | ` ` | 定义 Milvus 可写实例的地址,目前只支持静态设置。参考格式: `tcp://127.0.0.1:19530`。 | + +### 元数据 + +| 参数 | 是否必填 | 类型 | 默认值 | 说明 | +| ------------------------------ | -------- | ------- | ------- | ------------------------------------------------------------ | +| `SQLALCHEMY_DATABASE_URI` | **Yes** | string | ` ` | 定义元数据存储的数据库地址,格式标准为 RFC-738-style。例如:`mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4`。 | +| `SQL_ECHO` | No | boolean | `False` | 选择是否打印 SQL 详细语句。 | +| `SQLALCHEMY_DATABASE_TEST_URI` | No | string | ` ` | 定义测试环境下元数据存储的数据库地址。 | +| `SQL_TEST_ECHO` | No | boolean | `False` | 选择测试环境下是否打印 SQL 详细语句。 | + +### 服务发现 + +| 参数 | 是否必填 | 类型 | 默认值 | 说明 | +| ------------------------------------- | -------- | ------- | -------- | ------------------------------------------------------------ | +| `DISCOVERY_PLUGIN_PATH` | No | string | ` ` | 用户自定义服务发现插件的搜索路径,默认使用系统搜索路径。 | +| `DISCOVERY_CLASS_NAME` | No | string | `static` | 在插件搜索路径下,根据类名搜索类,并将其实例化。目前系统提供 `static` 和 `kubernetes` 两种类,默认使用 `static`。 | +| `DISCOVERY_STATIC_HOSTS` | No | list | `[]` | `DISCOVERY_CLASS_NAME`为 `static` 时,定义服务地址列表,地址之间以逗号隔开,例如 `192.168.1.188,192.168.1.190`。 | +| `DISCOVERY_STATIC_PORT` | No | integer | `19530` | `DISCOVERY_CLASS_NAME` 为 `static` 时,定义服务地址监听端口。 | +| `DISCOVERY_KUBERNETES_NAMESPACE` | No | string | ` ` | `DISCOVERY_CLASS_NAME` 为 `kubernetes`时,定义 Milvus 集群的namespace。 | +| `DISCOVERY_KUBERNETES_IN_CLUSTER` | No | boolean | `False` | `DISCOVERY_CLASS_NAME` 为 `kubernetes` 时,选择服务发现是否在集群中运行。 | +| `DISCOVERY_KUBERNETES_POLL_INTERVAL` | No | integer | `5` | `DISCOVERY_CLASS_NAME` 为 `kubernetes` 时,定义服务发现监听周期,单位:second。 | +| `DISCOVERY_KUBERNETES_POD_PATT` | No | string | ` ` | `DISCOVERY_CLASS_NAME` 为 `kubernetes` 时,匹配 Milvus Pod 名字的正则表达式。 | +| `DISCOVERY_KUBERNETES_LABEL_SELECTOR` | No | string | ` ` | `SD_PROVIDER`为`kubernetes`时,匹配 Milvus Pod 的标签。例如:`tier=ro-servers`。 | + +### 链路追踪 + +| 参数 | 是否必填 | 类型 | 默认值 | 说明 | +| ----------------------- | -------- | ------- | ---------- | ------------------------------------------------------------ | +| `TRACER_PLUGIN_PATH` | No | string | ` ` | 用户自定义链路追踪插件的搜索路径,默认使用系统搜索路径。 | +| `TRACER_CLASS_NAME` | No | string | ` ` | 在插件搜索路径下,根据类名搜索类,并将其实例化。目前只支持 `Jaeger`, 默认不使用。 | +| `TRACING_SERVICE_NAME` | No | string | `mishards` | `TRACING_CLASS_NAME` 为 [`Jaeger`](https://www.jaegertracing.io/docs/1.14/)时,链路追踪的 service。 | +| `TRACING_SAMPLER_TYPE` | No | string | `const` | `TRACING_CLASS_NAME`为 `Jaeger` 时,链路追踪的[采样类型](https://www.jaegertracing.io/docs/1.14/sampling/)。 | +| `TRACING_SAMPLER_PARAM` | No | integer | `1` | `TRACING_CLASS_NAME` 为 `Jaeger`时,链路追踪的[采样频率](https://www.jaegertracing.io/docs/1.14/sampling/)。 | +| `TRACING_LOG_PAYLOAD` | No | boolean | `False` | `TRACING_CLASS_NAME`为 `Jaeger`时,链路追踪是否采集 Payload。 | + +### 日志 + +| 参数 | 是否必填 | 类型 | 默认值 | 说明 | +| ----------- | -------- | ------ | --------------- | ------------------------------------------------------------ | +| `LOG_LEVEL` | No | string | `DEBUG` | 日志记录级别,目前支持 `DEBUG` 、`INFO` 、`WARNING` 和`ERROR`。 | +| `LOG_PATH` | No | string | `/tmp/mishards` | 日志记录路径。 | +| `LOG_NAME` | No | string | `logfile` | 日志记录名。 | + +### 路由 + +| 参数 | 是否必填 | 类型 | 默认值 | 说明 | +| ------------------------ | -------- | ------ | ------------------------- | ------------------------------------------------------------ | +| `ROUTER_PLUGIN_PATH` | No | string | ` ` | 用户自定义路由插件的搜索路径,默认使用系统搜索路径。 | +| `ROUTER_CLASS_NAME` | No | string | `FileBasedHashRingRouter` | 在插件搜索路径下,根据类名搜索路由的类,并将其实例化。目前系统只提供了 `FileBasedHashRingRouter`。 | +| `ROUTER_CLASS_TEST_NAME` | No | string | `FileBasedHashRingRouter` | 在插件搜索路径下,根据类名搜索路由的类,并将其实例化。目前系统只提供了 `FileBasedHashRingRouter`,仅限测试环境下使用。 | diff --git a/shards/Tutorial_CN.md b/shards/Tutorial_CN.md deleted file mode 100644 index 192a0fd285..0000000000 --- a/shards/Tutorial_CN.md +++ /dev/null @@ -1,147 +0,0 @@ -# Mishards使用文档 ---- -Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析。单个 Milvus 实例可处理十亿级数据规模,而对于百亿或者千亿规模数据的需求,则需要一个 Milvus 集群实例,该实例对于上层应用可以像单机实例一样使用,同时满足海量数据低延迟,高并发业务需求。mishards就是一个集群中间件,其内部处理请求转发,读写分离,水平扩展,动态扩容,为用户提供内存和算力可以无限扩容的 Milvus 实例。 - -## 运行环境 ---- - -### 单机快速启动实例 -**`python >= 3.4`环境** - -``` -1. cd milvus/shards -2. pip install -r requirements.txt -3. nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b -4. sudo chown -R $USER:$USER /tmp/milvus -5. cp mishards/.env.example mishards/.env -6. 在python mishards/main.py #.env配置mishards监听19532端口 -7. make probe port=19532 #健康检查 -``` - -### 容器启动实例 -`all_in_one`会在服务器上开启两个milvus实例,一个mishards实例,一个jaeger链路追踪实例 - -**启动** -``` -cd milvus/shards -1. 安装docker-compose -2. make build -3. make deploy #监听19531端口 -4. make clean_deploy #清理服务 -5. make probe_deplopy #健康检查 -``` - -**打开Jaeger UI** -``` -浏览器打开 "http://127.0.0.1:16686/" -``` - -### kubernetes中快速启动 -**准备** -``` -- kubernetes集群 -- 安装nvidia-docker -- 共享存储 -- 安装kubectl并能访问集群 -``` - -**步骤** -``` -cd milvus/shards -1. make deploy_cluster #启动集群 -2. make probe_cluster #健康检查 -3. make clean_cluster #关闭集群 -``` - -**扩容计算实例** -``` -cd milvus/shards/kubernetes_demo/ -./start.sh scale-ro-server 2 扩容计算实例到2 -``` - -**扩容代理器实例** -``` -cd milvus/shards/kubernetes_demo/ -./start.sh scale-proxy 2 扩容代理服务器实例到2 -``` - -**查看日志** -``` -kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 查看计算节点milvus-ro-servers-0日志 -``` - -## 测试 - -**启动单元测试** -``` -1. cd milvus/shards -2. make test -``` - -**单元测试覆盖率** -``` -1. cd milvus/shards -2. make coverage -``` - -**代码风格检查** -``` -1. cd milvus/shards -2. make style -``` - -## mishards配置详解 - -### 全局 -| Name | Required | Type | Default Value | Explanation | -| --------------------------- | -------- | -------- | ------------- | ------------- | -| Debug | No | bool | True | 是否Debug工作模式 | -| TIMEZONE | No | string | "UTC" | 时区 | -| MAX_RETRY | No | int | 3 | 最大连接重试次数 | -| SERVER_PORT | No | int | 19530 | 配置服务端口 | -| WOSERVER | **Yes** | str | - | 配置后台可写Milvus实例地址。目前只支持静态设置,例"tcp://127.0.0.1:19530" | - -### 元数据 -| Name | Required | Type | Default Value | Explanation | -| --------------------------- | -------- | -------- | ------------- | ------------- | -| SQLALCHEMY_DATABASE_URI | **Yes** | string | - | 配置元数据存储数据库地址 | -| SQL_ECHO | No | bool | False | 是否打印Sql详细语句 | -| SQLALCHEMY_DATABASE_TEST_URI | No | string | - | 配置测试环境下元数据存储数据库地址 | -| SQL_TEST_ECHO | No | bool | False | 配置测试环境下是否打印Sql详细语句 | - -### 服务发现 -| Name | Required | Type | Default Value | Explanation | -| --------------------------- | -------- | -------- | ------------- | ------------- | -| DISCOVERY_PLUGIN_PATH | No | string | - | 用户自定义服务发现插件搜索路径,默认使用系统搜索路径| -| DISCOVERY_CLASS_NAME | No | string | static | 在服务发现插件搜索路径下搜索类并实例化。目前系统提供 **static** 和 **kubernetes** 两种类,默认使用 **static** | -| DISCOVERY_STATIC_HOSTS | No | list | [] | **DISCOVERY_CLASS_NAME** 为 **static** 时,配置服务地址列表,例"192.168.1.188,192.168.1.190"| -| DISCOVERY_STATIC_PORT | No | int | 19530 | **DISCOVERY_CLASS_NAME** 为 **static** 时,配置 Hosts 监听端口 | -| DISCOVERY_KUBERNETES_NAMESPACE | No | string | - | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,配置集群 namespace | -| DISCOVERY_KUBERNETES_IN_CLUSTER | No | bool | False | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,标明服务发现是否在集群中运行 | -| DISCOVERY_KUBERNETES_POLL_INTERVAL | No | int | 5 | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,标明服务发现监听服务列表频率,单位 Second | -| DISCOVERY_KUBERNETES_POD_PATT | No | string | - | **DISCOVERY_CLASS_NAME** 为 **kubernetes** 时,匹配可读 Milvus 实例的正则表达式 | -| DISCOVERY_KUBERNETES_LABEL_SELECTOR | No | string | - | **SD_PROVIDER** 为**Kubernetes**时,匹配可读Milvus实例的标签选择 | - -### 链路追踪 -| Name | Required | Type | Default Value | Explanation | -| --------------------------- | -------- | -------- | ------------- | ------------- | -| TRACER_PLUGIN_PATH | No | string | - | 用户自定义链路追踪插件搜索路径,默认使用系统搜索路径| -| TRACER_CLASS_NAME | No | string | "" | 链路追踪方案选择,目前只实现 **Jaeger**, 默认不使用| -| TRACING_SERVICE_NAME | No | string | "mishards" | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪服务名 | -| TRACING_SAMPLER_TYPE | No | string | "const" | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪采样类型 | -| TRACING_SAMPLER_PARAM | No | int | 1 | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪采样频率 | -| TRACING_LOG_PAYLOAD | No | bool | False | **TRACING_TYPE** 为 **Jaeger** 时,链路追踪是否采集 Payload | - -### 日志 -| Name | Required | Type | Default Value | Explanation | -| --------------------------- | -------- | -------- | ------------- | ------------- | -| LOG_LEVEL | No | string | "DEBUG" if Debug is ON else "INFO" | 日志记录级别 | -| LOG_PATH | No | string | "/tmp/mishards" | 日志记录路径 | -| LOG_NAME | No | string | "logfile" | 日志记录名 | - -### 路由 -| Name | Required | Type | Default Value | Explanation | -| --------------------------- | -------- | -------- | ------------- | ------------- | -| ROUTER_PLUGIN_PATH | No | string | - | 用户自定义路由插件搜索路径,默认使用系统搜索路径| -| ROUTER_CLASS_NAME | No | string | FileBasedHashRingRouter | 处理请求路由类名, 可注册自定义类。目前系统只提供了类 **FileBasedHashRingRouter** | -| ROUTER_CLASS_TEST_NAME | No | string | FileBasedHashRingRouter | 测试环境下处理请求路由类名, 可注册自定义类 | From 285788eba8c47087a0965b4e21bc6b24bc577db1 Mon Sep 17 00:00:00 2001 From: jielinxu <52057195+jielinxu@users.noreply.github.com> Date: Tue, 12 Nov 2019 19:06:51 +0800 Subject: [PATCH 175/196] [skip ci] Add link to CN README --- shards/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/shards/README.md b/shards/README.md index dba6f785aa..f59eca0460 100644 --- a/shards/README.md +++ b/shards/README.md @@ -1,5 +1,7 @@ # Mishards - An Experimental Sharding Middleware +[中文版](README_CN.md) + Milvus aims to achieve efficient similarity search and analytics for massive-scale vectors. A standalone Milvus instance can easily handle vector search among billion-scale vectors. However, for 10 billion, 100 billion or even larger datasets, a Milvus cluster is needed. Ideally, this cluster can be accessed and used just as the standalone instance, meanwhile it satisfies the business requirements such as low latency and high concurrency. From dfcce896e2cd7ab6910730e2e384762095ad4cad Mon Sep 17 00:00:00 2001 From: groot Date: Tue, 12 Nov 2019 19:11:07 +0800 Subject: [PATCH 176/196] #260 C++ SDK README --- CHANGELOG.md | 1 + core/src/sdk/README.md | 93 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 core/src/sdk/README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 363c2cc385..02fb05d9cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ Please mark all change in change log and use the ticket from JIRA. ## Improvement - \#284 - Change C++ SDK to shared library +- \#260 - C++ SDK README ## Task diff --git a/core/src/sdk/README.md b/core/src/sdk/README.md new file mode 100644 index 0000000000..5dc5c733e0 --- /dev/null +++ b/core/src/sdk/README.md @@ -0,0 +1,93 @@ +### Build C++ sdk + +The C++ sdk source code is under milvus/core/src/sdk. Build entire milvus project will also build the sdk project. +If you don't want to build entire milvus project, you can do the following steps: +```shell + # generate make files + $ cd [Milvus root path]/core + $ ./build.sh -l + + # build C++ SDK project + $ cd [Milvus root path]/core/cmake_build + $ make -C src/sdk +``` + +### Try C++ example + +Firstly you need to launch a milvus server. +If you build whole milvus project, just run: +```shell + # start milvus server + $ cd [Milvus root path]/core + $ ./start_server.sh +``` +You also can pull milvus release docker image to launch milvus server. +```shell + # pull milvus docker image and start milvus server + $ docker pull milvusdb/milvus:latest + $ docker run --runtime=nvidia -p 19530:19530 -d milvusdb/milvus:latest +``` + +To run C++ example, use below command: + +```shell + # run milvus C++ example + $ cd [Milvus root path]/core/cmake_build/src/sdk/examples/simple + $ ./sdk_simple +``` + +### Make your own C++ client project + +Firstly create a project folder. And copy C++ sdk header and library into the folder. +```shell + # create project folder + $ mkdir MyMilvusClient + $ cd MyMilvusClient + + # copy necessary files + $ cp [Milvus root path]/core/cmake_build/src/sdk/libmilvus_sdk.so . + $ cp [Milvus root path]/core/src/sdk/include/MilvusApi.h . + $ cp [Milvus root path]/core/src/sdk/include/Status.h . +``` + +Create a main.cpp under the project folder, and include C++ sdk headers: +```shell +#include "./MilvusApi.h" +#include "./Status.h" + +int main() { + // connect to milvus server + std::shared_ptr conn = milvus::Connection::Create(); + milvus::ConnectParam param = {"127.0.0.1", "19530"}; + conn->Connect(param); + + // put your client code here + + milvus::Connection::Destroy(conn); + return 0; +} +``` + +Create a CMakeList.txt under the project folder, and paste the follow code into the file: +```shell + cmake_minimum_required(VERSION 3.14) + project(test) + set(CMAKE_CXX_STANDARD 14) + + add_executable(milvus_client main.cpp) + target_link_libraries(milvus_client + ${PROJECT_SOURCE_DIR}/libmilvus_sdk.so) +``` + +Build the client project: +```shell + $ mkdir cmake_build + $ cd cmake_build + $ cmake .. + $ make +``` + +Run your client program: +```shell + $ ./milvus_client +``` \ No newline at end of file From 919846ea6d329a995c2d4febb678c9f3261a72e1 Mon Sep 17 00:00:00 2001 From: groot Date: Tue, 12 Nov 2019 19:47:37 +0800 Subject: [PATCH 177/196] #260 C++ SDK README --- core/src/sdk/README.md | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/core/src/sdk/README.md b/core/src/sdk/README.md index 5dc5c733e0..48a047df1f 100644 --- a/core/src/sdk/README.md +++ b/core/src/sdk/README.md @@ -7,7 +7,7 @@ If you don't want to build entire milvus project, you can do the following steps $ cd [Milvus root path]/core $ ./build.sh -l - # build C++ SDK project + # build C++ sdk project $ cd [Milvus root path]/core/cmake_build $ make -C src/sdk ``` @@ -15,20 +15,20 @@ If you don't want to build entire milvus project, you can do the following steps ### Try C++ example Firstly you need to launch a milvus server. -If you build whole milvus project, just run: +If you already build entire milvus project, just run: ```shell # start milvus server $ cd [Milvus root path]/core $ ./start_server.sh ``` -You also can pull milvus release docker image to launch milvus server. +You also can pull milvus release docker image to launch milvus server: ```shell # pull milvus docker image and start milvus server $ docker pull milvusdb/milvus:latest $ docker run --runtime=nvidia -p 19530:19530 -d milvusdb/milvus:latest ``` -To run C++ example, use below command: +Run C++ example: ```shell # run milvus C++ example @@ -38,7 +38,7 @@ To run C++ example, use below command: ### Make your own C++ client project -Firstly create a project folder. And copy C++ sdk header and library into the folder. +Firstly create a project folder. And copy C++ sdk header and library files into the folder. ```shell # create project folder $ mkdir MyMilvusClient @@ -50,7 +50,7 @@ Firstly create a project folder. And copy C++ sdk header and library into the fo $ cp [Milvus root path]/core/src/sdk/include/Status.h . ``` -Create a main.cpp under the project folder, and include C++ sdk headers: +Create main.cpp under the project folder, and paste the following code into the file: ```shell #include "./MilvusApi.h" #include "./Status.h" @@ -68,7 +68,7 @@ int main() { } ``` -Create a CMakeList.txt under the project folder, and paste the follow code into the file: +Create CMakeList.txt under the project folder, and paste the following code into the file: ```shell cmake_minimum_required(VERSION 3.14) project(test) @@ -79,7 +79,17 @@ Create a CMakeList.txt under the project folder, and paste the follow code into ${PROJECT_SOURCE_DIR}/libmilvus_sdk.so) ``` -Build the client project: +Now there are 5 files in your project: +```shell +MyMilvusClient + |-CMakeList.txt + |-main.cpp + |-libmilvus_sdk.so + |-MilvusApi.h + |-Status.h + ``` + +Build the project: ```shell $ mkdir cmake_build $ cd cmake_build From c467ed9d07de4943930ec582af4f1e094e33b6b5 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Wed, 13 Nov 2019 11:15:03 +0800 Subject: [PATCH 178/196] add server_gpu_config.template --- core/CMakeLists.txt | 8 ++++- core/conf/server_gpu_config.template | 45 ++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 core/conf/server_gpu_config.template diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 65094b19a3..bacab79612 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -174,7 +174,13 @@ add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean) if ("${MILVUS_DB_PATH}" STREQUAL "") set(MILVUS_DB_PATH "/tmp/milvus") endif () -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) + +if (MILVUS_GPU_VERSION) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_gpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) +else() + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) +endif() + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf) install(DIRECTORY scripts/ diff --git a/core/conf/server_gpu_config.template b/core/conf/server_gpu_config.template new file mode 100644 index 0000000000..db172f4f1d --- /dev/null +++ b/core/conf/server_gpu_config.template @@ -0,0 +1,45 @@ +# Default values are used when you make no changes to the following parameters. + +server_config: + address: 0.0.0.0 # milvus server ip address (IPv4) + port: 19530 # milvus server port, must in range [1025, 65534] + deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable + time_zone: UTC+8 # time zone, must be in format: UTC+X + +db_config: + primary_path: @MILVUS_DB_PATH@ # path used to store data and meta + secondary_path: # path used to store data only, split by semicolon + + backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database + # Keep 'dialect://:@:/', and replace other texts with real values + # Replace 'dialect' with 'mysql' or 'sqlite' + + insert_buffer_size: 4 # GB, maximum insert buffer size allowed, must be a positive integer + # sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory + + preload_table: # preload data at startup, '*' means load all tables, empty value means no preload + # you can specify preload tables like this: table1,table2,table3 + +metric_config: + enable_monitor: false # enable monitoring or not, must be a boolean + collector: prometheus # prometheus + prometheus_config: + port: 8080 # port prometheus uses to fetch metrics, must in range [1025, 65534] + +cache_config: + cpu_cache_capacity: 16 # GB, CPU memory used for cache, must be a positive integer + cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] + gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer + gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] + cache_insert_data: false # whether to load inserted data into cache, must be a boolean + +engine_config: + use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times + # if nq >= use_blas_threshold, use OpenBlas, slower with stable response times + gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only + +resource_config: + search_resources: # define the devices used for search computation, must be in format: cpu or gpux + - cpu + - gpu0 + index_build_device: gpu0 # GPU used for building index, must be in format: gpux From 5772c3b40058eaeddc6cb515cab995c88a8d2bd9 Mon Sep 17 00:00:00 2001 From: groot Date: Wed, 13 Nov 2019 11:42:20 +0800 Subject: [PATCH 179/196] #260 C++ SDK README --- core/src/sdk/README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/core/src/sdk/README.md b/core/src/sdk/README.md index 48a047df1f..0f4e20d0e7 100644 --- a/core/src/sdk/README.md +++ b/core/src/sdk/README.md @@ -1,27 +1,27 @@ -### Build C++ sdk +### Build C++ SDK -The C++ sdk source code is under milvus/core/src/sdk. Build entire milvus project will also build the sdk project. -If you don't want to build entire milvus project, you can do the following steps: +The C++ SDK source code is under milvus/core/src/sdk. Build entire milvus project will also build the C++ SDK project. +If you don't want to build the entire milvus project, follow below steps: ```shell # generate make files $ cd [Milvus root path]/core $ ./build.sh -l - # build C++ sdk project + # build C++ SDK project $ cd [Milvus root path]/core/cmake_build $ make -C src/sdk ``` ### Try C++ example -Firstly you need to launch a milvus server. -If you already build entire milvus project, just run: +Firstly, you need to start a Milvus server. +If you've already built the entire milvus project, just start Milvus server with the following command: ```shell # start milvus server $ cd [Milvus root path]/core $ ./start_server.sh ``` -You also can pull milvus release docker image to launch milvus server: +You can also use Docker to start Milvus server: ```shell # pull milvus docker image and start milvus server $ docker pull milvusdb/milvus:latest @@ -38,7 +38,7 @@ Run C++ example: ### Make your own C++ client project -Firstly create a project folder. And copy C++ sdk header and library files into the folder. +Create a folder for the project, and copy C++ SDK header and library files into it. ```shell # create project folder $ mkdir MyMilvusClient @@ -50,7 +50,7 @@ Firstly create a project folder. And copy C++ sdk header and library files into $ cp [Milvus root path]/core/src/sdk/include/Status.h . ``` -Create main.cpp under the project folder, and paste the following code into the file: +Create file main.cpp in the project folder, and copy the following code into it: ```shell #include "./MilvusApi.h" #include "./Status.h" @@ -68,7 +68,7 @@ int main() { } ``` -Create CMakeList.txt under the project folder, and paste the following code into the file: +Create file CMakeList.txt in the project folder, and copy the following code into it: ```shell cmake_minimum_required(VERSION 3.14) project(test) From dfbd0ec63ae1b7f6c383ce4d1f7e1aab6db31b43 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Wed, 13 Nov 2019 13:38:59 +0800 Subject: [PATCH 180/196] update Jenkins CI --- ci/jenkins/scripts/build.sh | 142 --------------------------- ci/jenkins/step/build.groovy | 5 +- ci/jenkins/step/coverage.groovy | 2 +- ci/scripts/build.sh | 36 ++++++- ci/{jenkins => }/scripts/coverage.sh | 19 ++-- 5 files changed, 48 insertions(+), 156 deletions(-) delete mode 100755 ci/jenkins/scripts/build.sh rename ci/{jenkins => }/scripts/coverage.sh (86%) diff --git a/ci/jenkins/scripts/build.sh b/ci/jenkins/scripts/build.sh deleted file mode 100755 index 2ccdf4a618..0000000000 --- a/ci/jenkins/scripts/build.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink - DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - SOURCE="$(readlink "$SOURCE")" - [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located -done -SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build" -BUILD_TYPE="Debug" -BUILD_UNITTEST="OFF" -INSTALL_PREFIX="/opt/milvus" -BUILD_COVERAGE="OFF" -DB_PATH="/opt/milvus" -PROFILING="OFF" -USE_JFROG_CACHE="OFF" -RUN_CPPLINT="OFF" -CUSTOMIZATION="OFF" # default use ori faiss -CUDA_COMPILER=/usr/local/cuda/bin/nvcc - -CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}" -wget -q --method HEAD ${CUSTOMIZED_FAISS_URL} -if [ $? -eq 0 ]; then - CUSTOMIZATION="ON" -else - CUSTOMIZATION="OFF" -fi - -while getopts "o:d:t:ulcgjhx" arg -do - case $arg in - o) - INSTALL_PREFIX=$OPTARG - ;; - d) - DB_PATH=$OPTARG - ;; - t) - BUILD_TYPE=$OPTARG # BUILD_TYPE - ;; - u) - echo "Build and run unittest cases" ; - BUILD_UNITTEST="ON"; - ;; - l) - RUN_CPPLINT="ON" - ;; - c) - BUILD_COVERAGE="ON" - ;; - g) - PROFILING="ON" - ;; - j) - USE_JFROG_CACHE="ON" - ;; - x) - CUSTOMIZATION="OFF" # force use ori faiss - ;; - h) # help - echo " - -parameter: --o: install prefix(default: /opt/milvus) --d: db data path(default: /opt/milvus) --t: build type(default: Debug) --u: building unit test options(default: OFF) --l: run cpplint, clang-format and clang-tidy(default: OFF) --c: code coverage(default: OFF) --g: profiling(default: OFF) --j: use jfrog cache build directory(default: OFF) --h: help - -usage: -./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h] - " - exit 0 - ;; - ?) - echo "ERROR! unknown argument" - exit 1 - ;; - esac -done - -if [[ ! -d ${CMAKE_BUILD_DIR} ]]; then - mkdir ${CMAKE_BUILD_DIR} -fi - -cd ${CMAKE_BUILD_DIR} - -# remove make cache since build.sh -l use default variables -# force update the variables each time -make rebuild_cache - -CMAKE_CMD="cmake \ --DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ --DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} --DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ --DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \ --DBUILD_COVERAGE=${BUILD_COVERAGE} \ --DMILVUS_DB_PATH=${DB_PATH} \ --DMILVUS_ENABLE_PROFILING=${PROFILING} \ --DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ --DCUSTOMIZATION=${CUSTOMIZATION} \ --DFAISS_URL=${CUSTOMIZED_FAISS_URL} \ -.." -echo ${CMAKE_CMD} -${CMAKE_CMD} - -if [[ ${RUN_CPPLINT} == "ON" ]]; then - # cpplint check - make lint - if [ $? -ne 0 ]; then - echo "ERROR! cpplint check failed" - exit 1 - fi - echo "cpplint check passed!" - - # clang-format check - make check-clang-format - if [ $? -ne 0 ]; then - echo "ERROR! clang-format check failed" - exit 1 - fi - echo "clang-format check passed!" - -# # clang-tidy check -# make check-clang-tidy -# if [ $? -ne 0 ]; then -# echo "ERROR! clang-tidy check failed" -# rm -f CMakeCache.txt -# exit 1 -# fi -# echo "clang-tidy check passed!" -else - # compile and build - make -j8 || exit 1 - make install || exit 1 -fi diff --git a/ci/jenkins/step/build.groovy b/ci/jenkins/step/build.groovy index 14d0414f4f..5b3a46818d 100644 --- a/ci/jenkins/step/build.groovy +++ b/ci/jenkins/step/build.groovy @@ -1,8 +1,7 @@ timeout(time: 60, unit: 'MINUTES') { - dir ("ci/jenkins/scripts") { - sh "./build.sh -l" + dir ("ci/scripts") { withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { - sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -j -u -c" + sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -l -j -u -c" } } } diff --git a/ci/jenkins/step/coverage.groovy b/ci/jenkins/step/coverage.groovy index ff2e3e6fa2..75ac7b48b5 100644 --- a/ci/jenkins/step/coverage.groovy +++ b/ci/jenkins/step/coverage.groovy @@ -1,5 +1,5 @@ timeout(time: 30, unit: 'MINUTES') { - dir ("ci/jenkins/scripts") { + dir ("ci/scripts") { sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP" // Set some env variables so codecov detection script works correctly withCredentials([[$class: 'StringBinding', credentialsId: "${env.PIPELINE_NAME}-codecov-token", variable: 'CODECOV_TOKEN']]) { diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index a8bd339ff5..444537fe6b 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -18,7 +18,7 @@ INSTALL_PREFIX="/opt/milvus" BUILD_COVERAGE="OFF" USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" -CPU_VERSION="ON" +GPU_VERSION="OFF" WITH_MKL="OFF" CUDA_COMPILER=/usr/local/cuda/bin/nvcc @@ -35,7 +35,7 @@ do CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR ;; g) - CPU_VERSION="OFF"; + GPU_VERSION="ON"; ;; u) echo "Build and run unittest cases" ; @@ -84,13 +84,13 @@ if [[ ! -d ${CORE_BUILD_DIR} ]]; then mkdir ${CORE_BUILD_DIR} fi -pushd ${CORE_BUILD_DIR} +cd ${CORE_BUILD_DIR} CMAKE_CMD="cmake \ -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \ --DMILVUS_CPU_VERSION=${CPU_VERSION} \ +-DMILVUS_GPU_VERSION=${GPU_VERSION} \ -DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ @@ -100,6 +100,34 @@ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} ${CMAKE_CMD} + +if [[ ${RUN_CPPLINT} == "ON" ]]; then + # cpplint check + make lint + if [ $? -ne 0 ]; then + echo "ERROR! cpplint check failed" + exit 1 + fi + echo "cpplint check passed!" + + # clang-format check + make check-clang-format + if [ $? -ne 0 ]; then + echo "ERROR! clang-format check failed" + exit 1 + fi + echo "clang-format check passed!" + +# # clang-tidy check +# make check-clang-tidy +# if [ $? -ne 0 ]; then +# echo "ERROR! clang-tidy check failed" +# rm -f CMakeCache.txt +# exit 1 +# fi +# echo "clang-tidy check passed!" +fi + # compile and build make -j8 || exit 1 make install || exit 1 diff --git a/ci/jenkins/scripts/coverage.sh b/ci/scripts/coverage.sh similarity index 86% rename from ci/jenkins/scripts/coverage.sh rename to ci/scripts/coverage.sh index 5c9d010d46..17bc08ad00 100755 --- a/ci/jenkins/scripts/coverage.sh +++ b/ci/scripts/coverage.sh @@ -9,18 +9,22 @@ done SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" INSTALL_PREFIX="/opt/milvus" -CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build" +MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core" +CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build" MYSQL_USER_NAME=root MYSQL_PASSWORD=123456 MYSQL_HOST='127.0.0.1' MYSQL_PORT='3306' -while getopts "o:u:p:t:h" arg +while getopts "o:b:u:p:t:h" arg do case $arg in o) INSTALL_PREFIX=$OPTARG ;; + b) + CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR + ;; u) MYSQL_USER_NAME=$OPTARG ;; @@ -35,13 +39,14 @@ do parameter: -o: milvus install prefix(default: /opt/milvus) +-b: core code build directory -u: mysql account -p: mysql password -t: mysql host -h: help usage: -./coverage.sh -o \${INSTALL_PREFIX} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h] +./coverage.sh -o \${INSTALL_PREFIX} -b \$${CORE_BUILD_DIR} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h] " exit 0 ;; @@ -63,12 +68,14 @@ FILE_INFO_OUTPUT="output.info" FILE_INFO_OUTPUT_NEW="output_new.info" DIR_LCOV_OUTPUT="lcov_out" -DIR_GCNO="${CMAKE_BUILD_DIR}" +DIR_GCNO="${CORE_BUILD_DIR}" DIR_UNITTEST="${INSTALL_PREFIX}/unittest" +cd ${SCRIPTS_DIR} + # delete old code coverage info files -rm -rf lcov_out -rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW +rm -rf ${DIR_LCOV_OUTPUT} +rm -f ${FILE_INFO_BASE} ${FILE_INFO_MILVUS} ${FILE_INFO_OUTPUT} ${FILE_INFO_OUTPUT_NEW} MYSQL_DB_NAME=milvus_`date +%s%N` From 613c74afcf40c163734da5bcdbfeed137daab649 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Wed, 13 Nov 2019 13:43:44 +0800 Subject: [PATCH 181/196] rename server_config.template to server_cpu_config.template --- core/conf/{server_config.template => server_cpu_config.template} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename core/conf/{server_config.template => server_cpu_config.template} (100%) diff --git a/core/conf/server_config.template b/core/conf/server_cpu_config.template similarity index 100% rename from core/conf/server_config.template rename to core/conf/server_cpu_config.template From 03f297fff4a81c6efc05d42a690dcf40a42b8246 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Wed, 13 Nov 2019 13:52:16 +0800 Subject: [PATCH 182/196] format server_cpu_config.template and server_gpu_config.template --- ci/jenkins/step/build.groovy | 2 +- core/conf/server_cpu_config.template | 8 ++------ core/conf/server_gpu_config.template | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/ci/jenkins/step/build.groovy b/ci/jenkins/step/build.groovy index 5b3a46818d..ee0ca020a6 100644 --- a/ci/jenkins/step/build.groovy +++ b/ci/jenkins/step/build.groovy @@ -1,7 +1,7 @@ timeout(time: 60, unit: 'MINUTES') { dir ("ci/scripts") { withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { - sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -l -j -u -c" + sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -j -u -c" } } } diff --git a/core/conf/server_cpu_config.template b/core/conf/server_cpu_config.template index 8fc31366e3..6c95126390 100644 --- a/core/conf/server_cpu_config.template +++ b/core/conf/server_cpu_config.template @@ -32,16 +32,12 @@ cache_config: cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] cache_insert_data: false # whether to load inserted data into cache, must be a boolean -#Uncomment the following config if you are using GPU version -# gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer -# gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0] - engine_config: use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times # if nq >= use_blas_threshold, use OpenBlas, slower with stable response times gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only resource_config: - search_resources: # define the devices used for search computation, must be in format: cpu or gpux + search_resources: # define the device used for search computation - cpu - index_build_device: cpu # CPU / GPU used for building index, must be in format: cpu / gpux + index_build_device: cpu # CPU used for building index diff --git a/core/conf/server_gpu_config.template b/core/conf/server_gpu_config.template index db172f4f1d..154db5d134 100644 --- a/core/conf/server_gpu_config.template +++ b/core/conf/server_gpu_config.template @@ -42,4 +42,4 @@ resource_config: search_resources: # define the devices used for search computation, must be in format: cpu or gpux - cpu - gpu0 - index_build_device: gpu0 # GPU used for building index, must be in format: gpux + index_build_device: gpu0 # CPU / GPU used for building index, must be in format: cpu or gpux From 37487a8fee0ddf404a48119933bb47cbb0949703 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Wed, 13 Nov 2019 13:55:46 +0800 Subject: [PATCH 183/196] fix make error --- ci/jenkins/step/build.groovy | 2 +- core/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/jenkins/step/build.groovy b/ci/jenkins/step/build.groovy index ee0ca020a6..bae4259a6f 100644 --- a/ci/jenkins/step/build.groovy +++ b/ci/jenkins/step/build.groovy @@ -1,7 +1,7 @@ timeout(time: 60, unit: 'MINUTES') { dir ("ci/scripts") { withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { - sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -j -u -c" + sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -g -j -u -c" } } } diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index bacab79612..d2ba516fec 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -178,7 +178,7 @@ endif () if (MILVUS_GPU_VERSION) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_gpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) else() - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_cpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml) endif() configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf) From 46cce2cd8eded0b6731f3673f4e6caeffd495c94 Mon Sep 17 00:00:00 2001 From: ZhifengZhang-CN Date: Wed, 13 Nov 2019 14:08:14 +0800 Subject: [PATCH 184/196] format ci/scripts/coverage.sh --- ci/scripts/coverage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/scripts/coverage.sh b/ci/scripts/coverage.sh index 17bc08ad00..1590e445ba 100755 --- a/ci/scripts/coverage.sh +++ b/ci/scripts/coverage.sh @@ -46,7 +46,7 @@ parameter: -h: help usage: -./coverage.sh -o \${INSTALL_PREFIX} -b \$${CORE_BUILD_DIR} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h] +./coverage.sh -o \${INSTALL_PREFIX} -b \${CORE_BUILD_DIR} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h] " exit 0 ;; From afbe9152e90e76f8a9de6716c14fdf7fe65c9b1c Mon Sep 17 00:00:00 2001 From: "yudong.cai" Date: Wed, 13 Nov 2019 14:34:07 +0800 Subject: [PATCH 185/196] #275 rename c++ sdk IndexType --- CHANGELOG.md | 1 + .../sdk/examples/partition/src/ClientTest.cpp | 2 +- .../src/sdk/examples/simple/src/ClientTest.cpp | 2 +- core/src/sdk/examples/utils/Utils.cpp | 18 ++++++++++-------- core/src/sdk/include/MilvusApi.h | 12 ++++++------ 5 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02fb05d9cc..ce1af3a29b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Please mark all change in change log and use the ticket from JIRA. - \#226 - Experimental shards middleware for Milvus ## Improvement +- \#275 - Rename C++ SDK IndexType - \#284 - Change C++ SDK to shared library - \#260 - C++ SDK README diff --git a/core/src/sdk/examples/partition/src/ClientTest.cpp b/core/src/sdk/examples/partition/src/ClientTest.cpp index 6e4a7d1826..e58de0ce27 100644 --- a/core/src/sdk/examples/partition/src/ClientTest.cpp +++ b/core/src/sdk/examples/partition/src/ClientTest.cpp @@ -41,7 +41,7 @@ constexpr int64_t NQ = 5; constexpr int64_t TOP_K = 10; constexpr int64_t NPROBE = 32; constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different -constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; +constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::IVFSQ8; constexpr int32_t N_LIST = 15000; constexpr int32_t PARTITION_COUNT = 5; constexpr int32_t TARGET_PARTITION = 3; diff --git a/core/src/sdk/examples/simple/src/ClientTest.cpp b/core/src/sdk/examples/simple/src/ClientTest.cpp index 9045168f2a..9b217a1ed0 100644 --- a/core/src/sdk/examples/simple/src/ClientTest.cpp +++ b/core/src/sdk/examples/simple/src/ClientTest.cpp @@ -41,7 +41,7 @@ constexpr int64_t TOP_K = 10; constexpr int64_t NPROBE = 32; constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different constexpr int64_t ADD_VECTOR_LOOP = 5; -constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8; +constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::IVFSQ8; constexpr int32_t N_LIST = 15000; milvus::TableSchema diff --git a/core/src/sdk/examples/utils/Utils.cpp b/core/src/sdk/examples/utils/Utils.cpp index c527cf47e1..3f96a5b6df 100644 --- a/core/src/sdk/examples/utils/Utils.cpp +++ b/core/src/sdk/examples/utils/Utils.cpp @@ -89,14 +89,16 @@ Utils::MetricTypeName(const milvus::MetricType& metric_type) { std::string Utils::IndexTypeName(const milvus::IndexType& index_type) { switch (index_type) { - case milvus::IndexType::cpu_idmap: - return "cpu idmap"; - case milvus::IndexType::gpu_ivfflat: - return "gpu ivflat"; - case milvus::IndexType::gpu_ivfsq8: - return "gpu ivfsq8"; - case milvus::IndexType::mix_nsg: - return "mix nsg"; + case milvus::IndexType::FLAT: + return "FLAT"; + case milvus::IndexType::IVFFLAT: + return "IVFFLAT"; + case milvus::IndexType::IVFSQ8: + return "IVFSQ8"; + case milvus::IndexType::NSG: + return "NSG"; + case milvus::IndexType::IVFSQ8H: + return "IVFSQ8H"; default: return "Unknown index type"; } diff --git a/core/src/sdk/include/MilvusApi.h b/core/src/sdk/include/MilvusApi.h index 8c92375649..ea1dbf6d75 100644 --- a/core/src/sdk/include/MilvusApi.h +++ b/core/src/sdk/include/MilvusApi.h @@ -31,12 +31,12 @@ namespace milvus { * @brief Index Type */ enum class IndexType { - invalid = 0, - cpu_idmap, - gpu_ivfflat, - gpu_ivfsq8, - mix_nsg, - ivfsq8h, + INVALID = 0, + FLAT = 1, + IVFFLAT = 2, + IVFSQ8 = 3, + NSG = 4, + IVFSQ8H = 5, }; enum class MetricType { From bf69d10f86b821a9456bdc062213e174b6b515bb Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 14:53:37 +0800 Subject: [PATCH 186/196] add find faiss --- core/build.sh | 2 +- core/src/CMakeLists.txt | 2 +- core/src/index/build.sh | 6 +- core/src/index/cmake/DefineOptionsCore.cmake | 2 +- core/src/index/cmake/FindFAISS.cmake | 44 +++++++++ .../index/cmake/ThirdPartyPackagesCore.cmake | 96 ++++++++++--------- core/src/index/knowhere/CMakeLists.txt | 2 +- core/src/index/unittest/CMakeLists.txt | 2 +- .../unittest/faiss_benchmark/CMakeLists.txt | 2 +- .../index/unittest/faiss_ori/CMakeLists.txt | 2 +- .../index/unittest/test_nsg/CMakeLists.txt | 2 +- 11 files changed, 105 insertions(+), 57 deletions(-) create mode 100644 core/src/index/cmake/FindFAISS.cmake diff --git a/core/build.sh b/core/build.sh index e844528ad3..5abdaf175a 100755 --- a/core/build.sh +++ b/core/build.sh @@ -117,7 +117,7 @@ CMAKE_CMD="cmake \ -DCUSTOMIZATION=${CUSTOMIZATION} \ -DFAISS_URL=${CUSTOMIZED_FAISS_URL} \ -DMILVUS_GPU_VERSION=${GPU_VERSION} \ --DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ +-DFAISS_WITH_MKL=${WITH_MKL} \ ../" echo ${CMAKE_CMD} ${CMAKE_CMD} diff --git a/core/src/CMakeLists.txt b/core/src/CMakeLists.txt index 79b5e0f1da..4d55fbee18 100644 --- a/core/src/CMakeLists.txt +++ b/core/src/CMakeLists.txt @@ -24,7 +24,7 @@ include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus) add_subdirectory(index) -if (BUILD_FAISS_WITH_MKL) +if (FAISS_WITH_MKL) add_compile_definitions("WITH_MKL") endif () diff --git a/core/src/index/build.sh b/core/src/index/build.sh index a77a16cf0b..357ac5693a 100644 --- a/core/src/index/build.sh +++ b/core/src/index/build.sh @@ -5,7 +5,7 @@ BUILD_UNITTEST="OFF" INSTALL_PREFIX=$(pwd)/cmake_build MAKE_CLEAN="OFF" PROFILING="OFF" -BUILD_FAISS_WITH_MKL="OFF" +FAISS_WITH_MKL="OFF" USE_JFROG_CACHE="OFF" while getopts "p:d:t:uhrcgmj" arg @@ -31,7 +31,7 @@ do PROFILING="ON" ;; m) - BUILD_FAISS_WITH_MKL="ON" + FAISS_WITH_MKL="ON" ;; j) USE_JFROG_CACHE="ON" @@ -74,7 +74,7 @@ if [[ ${MAKE_CLEAN} == "ON" ]]; then -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \ -DMILVUS_ENABLE_PROFILING=${PROFILING} \ - -DBUILD_FAISS_WITH_MKL=${BUILD_FAISS_WITH_MKL} \ + -DFAISS_WITH_MKL=${FAISS_WITH_MKL} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ ../" echo ${CMAKE_CMD} diff --git a/core/src/index/cmake/DefineOptionsCore.cmake b/core/src/index/cmake/DefineOptionsCore.cmake index 2f050cdf4e..787a9c484f 100644 --- a/core/src/index/cmake/DefineOptionsCore.cmake +++ b/core/src/index/cmake/DefineOptionsCore.cmake @@ -79,7 +79,7 @@ define_option(KNOWHERE_WITH_FAISS "Build with FAISS library" ON) define_option(KNOWHERE_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON) -define_option(BUILD_FAISS_WITH_MKL "Build FAISS with MKL" OFF) +define_option(FAISS_WITH_MKL "Build FAISS with MKL" OFF) #---------------------------------------------------------------------- set_option_category("Test and benchmark") diff --git a/core/src/index/cmake/FindFAISS.cmake b/core/src/index/cmake/FindFAISS.cmake new file mode 100644 index 0000000000..e0e6337bce --- /dev/null +++ b/core/src/index/cmake/FindFAISS.cmake @@ -0,0 +1,44 @@ +set(FAISS_STATIC_LIB_NAME ${CMAKE_STATIC_LIBRARY_PREFIX}faiss${CMAKE_STATIC_LIBRARY_SUFFIX}) + +# First, find via if specified FAISS_ROOT +if (FAISS_ROOT) + find_library(FAISS_STATIC_LIB + NAMES ${FAISS_STATIC_LIB_NAME} + PATHS ${FAISS_ROOT} + PATH_SUFFIXES "lib" + NO_DEFAULT_PATH + ) + find_path(FAISS_INCLUDE_DIR + NAMES "faiss/Index.h" + PATHS ${FAISS_ROOT} + NO_DEFAULT_PATH + PATH_SUFFIXES "include" + ) +endif () + +find_package_handle_standard_args(FAISS REQUIRED_VARS FAISS_STATIC_LIB FAISS_INCLUDE_DIR) + +if (FAISS_FOUND) + if (NOT TARGET faiss) + add_library(faiss STATIC IMPORTED) + + set_target_properties( + faiss + PROPERTIES + IMPORTED_LOCATION "${FAISS_STATIC_LIB}" + INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}" + ) + + if (FAISS_WITH_MKL) + set_target_properties( + faiss + PROPERTIES + INTERFACE_LINK_LIBRARIES "${MKL_LIBS}") + else () + set_target_properties( + faiss + PROPERTIES + INTERFACE_LINK_LIBRARIES "openblas;lapack") + endif () + endif () +endif () diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 60ac4b9eac..8563e37e87 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -21,6 +21,7 @@ set(KNOWHERE_THIRDPARTY_DEPENDENCIES GTest LAPACK OpenBLAS + MKL ) message(STATUS "Using ${KNOWHERE_DEPENDENCY_SOURCE} approach to find dependencies") @@ -43,6 +44,8 @@ macro(build_dependency DEPENDENCY_NAME) build_openblas() elseif ("${DEPENDENCY_NAME}" STREQUAL "FAISS") build_faiss() + elseif ("${DEPENDENCY_NAME}" STREQUAL "MKL") + build_mkl() else () message(FATAL_ERROR "Unknown thirdparty dependency to build: ${DEPENDENCY_NAME}") endif () @@ -51,9 +54,9 @@ endmacro() macro(resolve_dependency DEPENDENCY_NAME) if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO") find_package(${DEPENDENCY_NAME} MODULE) - if(NOT ${${DEPENDENCY_NAME}_FOUND}) - build_dependency(${DEPENDENCY_NAME}) - endif() + if (NOT ${${DEPENDENCY_NAME}_FOUND}) + build_dependency(${DEPENDENCY_NAME}) + endif () elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED") build_dependency(${DEPENDENCY_NAME}) elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM") @@ -238,11 +241,11 @@ if (CUSTOMIZATION) # set(FAISS_MD5 "f3b2ce3364c3fa7febd3aa7fdd0fe380") # commit-id 694e03458e6b69ce8a62502f71f69a614af5af8f branch-0.3.0 # set(FAISS_MD5 "bb30722c22390ce5f6759ccb216c1b2a") # commit-id d324db297475286afe107847c7fb7a0f9dc7e90e branch-0.3.0 set(FAISS_MD5 "2293cdb209c3718e3b19f3edae8b32b3") # commit-id a13c1205dc52977a9ad3b33a14efa958604a8bff branch-0.3.0 - endif() -else() + endif () +else () set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/1.6.0.tar.gz") set(FAISS_MD5 "b02c1a53234f5acc9bea1b0c55524f50") -endif() +endif () message(STATUS "FAISS URL = ${FAISS_SOURCE_URL}") if (DEFINED ENV{KNOWHERE_ARROW_URL}) @@ -673,28 +676,46 @@ if (KNOWHERE_BUILD_TESTS AND NOT TARGET googletest_ep) include_directories(SYSTEM ${GTEST_INCLUDE_DIR}) endif () +# ---------------------------------------------------------------------- +# MKL + +macro(build_mkl) + + if (FAISS_WITH_MKL) + if (EXISTS "/proc/cpuinfo") + FILE(READ /proc/cpuinfo PROC_CPUINFO) + + SET(VENDOR_ID_RX "vendor_id[ \t]*:[ \t]*([a-zA-Z]+)\n") + STRING(REGEX MATCH "${VENDOR_ID_RX}" VENDOR_ID "${PROC_CPUINFO}") + STRING(REGEX REPLACE "${VENDOR_ID_RX}" "\\1" VENDOR_ID "${VENDOR_ID}") + + if (NOT ${VENDOR_ID} STREQUAL "GenuineIntel") + set(FAISS_WITH_MKL OFF) + endif () + endif () + + find_path(MKL_LIB_PATH + NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a" + PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/") + if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND") + message(FATAL_ERROR "Could not find MKL libraries") + endif () + message(STATUS "MKL lib path = ${MKL_LIB_PATH}") + + set(MKL_LIBS + ${MKL_LIB_PATH}/libmkl_intel_ilp64.a + ${MKL_LIB_PATH}/libmkl_gnu_thread.a + ${MKL_LIB_PATH}/libmkl_core.a + ) + endif () +endmacro() + # ---------------------------------------------------------------------- # FAISS macro(build_faiss) message(STATUS "Building FAISS-${FAISS_VERSION} from source") - if (NOT DEFINED BUILD_FAISS_WITH_MKL) - set(BUILD_FAISS_WITH_MKL OFF) - endif () - - if (EXISTS "/proc/cpuinfo") - FILE(READ /proc/cpuinfo PROC_CPUINFO) - - SET(VENDOR_ID_RX "vendor_id[ \t]*:[ \t]*([a-zA-Z]+)\n") - STRING(REGEX MATCH "${VENDOR_ID_RX}" VENDOR_ID "${PROC_CPUINFO}") - STRING(REGEX REPLACE "${VENDOR_ID_RX}" "\\1" VENDOR_ID "${VENDOR_ID}") - - if (NOT ${VENDOR_ID} STREQUAL "GenuineIntel") - set(BUILD_FAISS_WITH_MKL OFF) - endif () - endif () - set(FAISS_PREFIX "${INDEX_BINARY_DIR}/faiss_ep-prefix/src/faiss_ep") set(FAISS_INCLUDE_DIR "${FAISS_PREFIX}/include") set(FAISS_STATIC_LIB @@ -706,30 +727,11 @@ macro(build_faiss) "CXXFLAGS=${EP_CXX_FLAGS}" --without-python) - set(FAISS_CFLAGS ${EP_C_FLAGS}) - set(FAISS_CXXFLAGS ${EP_CXX_FLAGS}) - - if (BUILD_FAISS_WITH_MKL) - - find_path(MKL_LIB_PATH - NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a" - PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/") - if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND") - message(FATAL_ERROR "Could not find MKL libraries") - endif () - message(STATUS "Build Faiss with MKL. MKL lib path = ${MKL_LIB_PATH}") - - set(MKL_LIBS - ${MKL_LIB_PATH}/libmkl_intel_ilp64.a - ${MKL_LIB_PATH}/libmkl_gnu_thread.a - ${MKL_LIB_PATH}/libmkl_core.a - ) - + if (FAISS_WITH_MKL) set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} "CPPFLAGS=-DFINTEGER=long -DMKL_ILP64 -m64 -I${MKL_LIB_PATH}/../../include" "LDFLAGS=-L${MKL_LIB_PATH}" ) - else () message(STATUS "Build Faiss with OpenBlas/LAPACK") set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} @@ -770,7 +772,7 @@ macro(build_faiss) BUILD_BYPRODUCTS ${FAISS_STATIC_LIB}) - if (NOT BUILD_FAISS_WITH_MKL) + if (NOT FAISS_WITH_MKL) ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep) endif () @@ -800,7 +802,7 @@ macro(build_faiss) BUILD_BYPRODUCTS ${FAISS_STATIC_LIB}) - if (NOT BUILD_FAISS_WITH_MKL) + if (NOT FAISS_WITH_MKL) ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep) endif () @@ -815,7 +817,7 @@ macro(build_faiss) IMPORTED_LOCATION "${FAISS_STATIC_LIB}" INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}" ) - if (BUILD_FAISS_WITH_MKL) + if (FAISS_WITH_MKL) set_target_properties( faiss PROPERTIES @@ -834,7 +836,9 @@ endmacro() if (KNOWHERE_WITH_FAISS AND NOT TARGET faiss_ep) - if (NOT BUILD_FAISS_WITH_MKL) + if (FAISS_WITH_MKL) + resolve_dependency(MKL) + else () resolve_dependency(OpenBLAS) get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES) include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}") diff --git a/core/src/index/knowhere/CMakeLists.txt b/core/src/index/knowhere/CMakeLists.txt index 373965acf0..5f8e4d6970 100644 --- a/core/src/index/knowhere/CMakeLists.txt +++ b/core/src/index/knowhere/CMakeLists.txt @@ -54,7 +54,7 @@ set(depend_libs gfortran pthread ) -if (BUILD_FAISS_WITH_MKL) +if (FAISS_WITH_MKL) set(depend_libs ${depend_libs} "-Wl,--start-group \ ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ diff --git a/core/src/index/unittest/CMakeLists.txt b/core/src/index/unittest/CMakeLists.txt index 13277aa40e..71636738aa 100644 --- a/core/src/index/unittest/CMakeLists.txt +++ b/core/src/index/unittest/CMakeLists.txt @@ -8,7 +8,7 @@ set(depend_libs faiss arrow "${ARROW_LIB_DIR}/libjemalloc_pic.a" ) -if (BUILD_FAISS_WITH_MKL) +if (FAISS_WITH_MKL) set(depend_libs ${depend_libs} "-Wl,--start-group \ ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ diff --git a/core/src/index/unittest/faiss_benchmark/CMakeLists.txt b/core/src/index/unittest/faiss_benchmark/CMakeLists.txt index 6cad5abda5..e2fd7a6c85 100644 --- a/core/src/index/unittest/faiss_benchmark/CMakeLists.txt +++ b/core/src/index/unittest/faiss_benchmark/CMakeLists.txt @@ -15,7 +15,7 @@ if (KNOWHERE_GPU_VERSION) faiss hdf5 arrow ${ARROW_LIB_DIR}/libjemalloc_pic.a ) - if (BUILD_FAISS_WITH_MKL) + if (FAISS_WITH_MKL) set(depend_libs ${depend_libs} "-Wl,--start-group \ ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ diff --git a/core/src/index/unittest/faiss_ori/CMakeLists.txt b/core/src/index/unittest/faiss_ori/CMakeLists.txt index 829a27fd87..907076ca6e 100644 --- a/core/src/index/unittest/faiss_ori/CMakeLists.txt +++ b/core/src/index/unittest/faiss_ori/CMakeLists.txt @@ -10,7 +10,7 @@ if (KNOWHERE_GPU_VERSION) faiss arrow ${ARROW_LIB_DIR}/libjemalloc_pic.a ) - if (BUILD_FAISS_WITH_MKL) + if (FAISS_WITH_MKL) set(depend_libs ${depend_libs} "-Wl,--start-group \ ${MKL_LIB_PATH}/libmkl_intel_ilp64.a \ diff --git a/core/src/index/unittest/test_nsg/CMakeLists.txt b/core/src/index/unittest/test_nsg/CMakeLists.txt index 269c0d2139..82dbb5110c 100644 --- a/core/src/index/unittest/test_nsg/CMakeLists.txt +++ b/core/src/index/unittest/test_nsg/CMakeLists.txt @@ -5,7 +5,7 @@ add_definitions(-std=c++11 -O3 -lboost -march=native -Wall -DINFO) find_package(OpenMP REQUIRED) -if (OPENMP_FOUND) +if (OpenMP_FOUND) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") else () From 1f60505ef764ae3ef60f75b3dba4175b9a7fca04 Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 15:17:44 +0800 Subject: [PATCH 187/196] update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce1af3a29b..be8bc36b3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Please mark all change in change log and use the ticket from JIRA. - \#12 - Pure CPU version for Milvus - \#77 - Support table partition - \#226 - Experimental shards middleware for Milvus +- \#314 - add Find FAISS in CMake ## Improvement - \#275 - Rename C++ SDK IndexType From f20e5705b4144706c19d4e1a24dd7cd64ffc0fdd Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 15:43:24 +0800 Subject: [PATCH 188/196] Add Q&A for 'protocol https not supported or disable in libcurl' issue --- CHANGELOG.md | 3 ++- install.md | 26 ++++++++++++++++++++++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be8bc36b3b..d7b4c85ae8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,12 +11,13 @@ Please mark all change in change log and use the ticket from JIRA. - \#12 - Pure CPU version for Milvus - \#77 - Support table partition - \#226 - Experimental shards middleware for Milvus -- \#314 - add Find FAISS in CMake ## Improvement - \#275 - Rename C++ SDK IndexType - \#284 - Change C++ SDK to shared library - \#260 - C++ SDK README +- \#314 - add Find FAISS in CMake +- \#310 - Add Q&A for 'protocol https not supported or disable in libcurl' issue ## Task diff --git a/install.md b/install.md index f869520ba3..f92174ae36 100644 --- a/install.md +++ b/install.md @@ -3,7 +3,10 @@ ## Software requirements - Ubuntu 18.04 or higher -- CMake 3.14 or higher +- CMake 3.12 or higher + +##### For GPU version, you will also need: + - CUDA 10.0 or higher - NVIDIA driver 418 or higher @@ -12,17 +15,20 @@ ### Step 1 Install dependencies ```shell -$ cd [Milvus sourcecode path]/core +$ cd [Milvus root path]/core $ ./ubuntu_build_deps.sh ``` ### Step 2 Build ```shell -$ cd [Milvus sourcecode path]/core +$ cd [Milvus root path]/core $ ./build.sh -t Debug or $ ./build.sh -t Release + +For GPU version, add -g option +$ ./build.sh -g ``` When the build is completed, all the stuff that you need in order to run Milvus will be installed under `[Milvus root path]/core/milvus`. @@ -36,7 +42,7 @@ $ cd [Milvus root path]/core/milvus Add `lib/` directory to `LD_LIBRARY_PATH` ``` -$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/milvus/lib +$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:[Milvus root path]/core/milvus/lib ``` Then start Milvus server: @@ -51,3 +57,15 @@ To stop Milvus server, run: ```shell $ ./stop_server.sh ``` + +## Troubleshooting +1. If you encounter the following error when compiling: +`protocol https not supported or disabled in libcurl`. +First, make sure you have `libcurl4-openssl-dev` installed in your system. +Then try reinstall CMake from source with `--system-curl` option: +``` +$ ./bootstrap --system-curl +$ make +$ sudo make install +``` + From 12f87049a09712b30d73646adb90457d7de830ac Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 15:46:45 +0800 Subject: [PATCH 189/196] update CHANGELOG --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be8bc36b3b..fefd4e54c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,12 +11,12 @@ Please mark all change in change log and use the ticket from JIRA. - \#12 - Pure CPU version for Milvus - \#77 - Support table partition - \#226 - Experimental shards middleware for Milvus -- \#314 - add Find FAISS in CMake ## Improvement - \#275 - Rename C++ SDK IndexType - \#284 - Change C++ SDK to shared library - \#260 - C++ SDK README +- \#314 - add Find FAISS in CMake ## Task From 1fa4c2531a2762130b9707bace08805c4601a07f Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 16:01:03 +0800 Subject: [PATCH 190/196] remove FAISS_URL --- .../index/cmake/ThirdPartyPackagesCore.cmake | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 8563e37e87..854b295b1a 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -225,28 +225,12 @@ foreach (_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT}) set(${_LIB_NAME} "${_LIB_VERSION}") endforeach () -if (CUSTOMIZATION) - execute_process(COMMAND wget -q --method HEAD ${FAISS_URL} RESULT_VARIABLE return_code) - message(STATUS "Check the remote cache file ${FAISS_URL}. return code = ${return_code}") - if (NOT return_code EQUAL 0) - MESSAGE(FATAL_ERROR "Can't access to ${FAISS_URL}") - else () - set(FAISS_SOURCE_URL ${FAISS_URL}) - # set(FAISS_MD5 "a589663865a8558205533c8ac414278c") - # set(FAISS_MD5 "57da9c4f599cc8fa4260488b1c96e1cc") # commit-id 6dbdf75987c34a2c853bd172ea0d384feea8358c branch-0.2.0 - # set(FAISS_MD5 "21deb1c708490ca40ecb899122c01403") # commit-id 643e48f479637fd947e7b93fa4ca72b38ecc9a39 branch-0.2.0 - # set(FAISS_MD5 "072db398351cca6e88f52d743bbb9fa0") # commit-id 3a2344d04744166af41ef1a74449d68a315bfe17 branch-0.2.1 - # set(FAISS_MD5 "c89ea8e655f5cdf58f42486f13614714") # commit-id 9c28a1cbb88f41fa03b03d7204106201ad33276b branch-0.2.1 - # set(FAISS_MD5 "87fdd86351ffcaf3f80dc26ade63c44b") # commit-id 841a156e67e8e22cd8088e1b58c00afbf2efc30b branch-0.2.1 - # set(FAISS_MD5 "f3b2ce3364c3fa7febd3aa7fdd0fe380") # commit-id 694e03458e6b69ce8a62502f71f69a614af5af8f branch-0.3.0 - # set(FAISS_MD5 "bb30722c22390ce5f6759ccb216c1b2a") # commit-id d324db297475286afe107847c7fb7a0f9dc7e90e branch-0.3.0 - set(FAISS_MD5 "2293cdb209c3718e3b19f3edae8b32b3") # commit-id a13c1205dc52977a9ad3b33a14efa958604a8bff branch-0.3.0 - endif () +if (DEFINED ENV{FAISS_SOURCE_URL}) + set(FAISS_SOURCE_URL "$ENV{FAISS_SOURCE_URL}") else () - set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/1.6.0.tar.gz") + set(FAISS_SOURCE_URL "https://github.com/milvus-io/faiss/archive/${FAISS_VERSION}.tar.gz") set(FAISS_MD5 "b02c1a53234f5acc9bea1b0c55524f50") endif () -message(STATUS "FAISS URL = ${FAISS_SOURCE_URL}") if (DEFINED ENV{KNOWHERE_ARROW_URL}) set(ARROW_SOURCE_URL "$ENV{KNOWHERE_ARROW_URL}") From de1aa04bd0cf5666d325cb0ff08a61689bb6c8f9 Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 16:11:31 +0800 Subject: [PATCH 191/196] update build.sh --- core/build.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/core/build.sh b/core/build.sh index 5abdaf175a..9b690a0261 100755 --- a/core/build.sh +++ b/core/build.sh @@ -14,16 +14,10 @@ CUSTOMIZATION="OFF" # default use ori faiss CUDA_COMPILER=/usr/local/cuda/bin/nvcc GPU_VERSION="OFF" #defaults to CPU version WITH_MKL="OFF" +FAISS_ROOT="" +FAISS_SOURCE="BUNDLED" -CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}" -wget -q --method HEAD ${CUSTOMIZED_FAISS_URL} -if [ $? -eq 0 ]; then - CUSTOMIZATION="ON" -else - CUSTOMIZATION="OFF" -fi - -while getopts "p:d:t:ulrcgjhxzm" arg +while getopts "p:d:t:f:ulrcgjhxzm" arg do case $arg in p) @@ -35,6 +29,10 @@ do t) BUILD_TYPE=$OPTARG # BUILD_TYPE ;; + f) + FAISS_ROOT=$OPTARG + FAISS_SOURCE="AUTO" + ;; u) echo "Build and run unittest cases" ; BUILD_UNITTEST="ON"; @@ -73,6 +71,7 @@ parameter: -p: install prefix(default: $(pwd)/milvus) -d: db data path(default: /tmp/milvus) -t: build type(default: Debug) +-f: faiss root path(default: empty) -u: building unit test options(default: OFF) -l: run cpplint, clang-format and clang-tidy(default: OFF) -r: remove previous build directory(default: OFF) @@ -84,7 +83,7 @@ parameter: -h: help usage: -./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-h] +./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} -f \${FAISS_ROOT} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-h] " exit 0 ;; @@ -109,13 +108,14 @@ CMAKE_CMD="cmake \ -DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ +-DFAISS_ROOT=${FAISS_ROOT} \ +-DFAISS_SOURCE=${FAISS_SOURCE} \ -DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DMILVUS_DB_PATH=${DB_PATH} \ -DMILVUS_ENABLE_PROFILING=${PROFILING} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ -DCUSTOMIZATION=${CUSTOMIZATION} \ --DFAISS_URL=${CUSTOMIZED_FAISS_URL} \ -DMILVUS_GPU_VERSION=${GPU_VERSION} \ -DFAISS_WITH_MKL=${WITH_MKL} \ ../" From 7aef840e48f3bfc014ce2d48176a4977594afbe6 Mon Sep 17 00:00:00 2001 From: quicksilver Date: Wed, 13 Nov 2019 16:14:59 +0800 Subject: [PATCH 192/196] Fix determine Milvus version from git branch name bug --- core/CMakeLists.txt | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index d2ba516fec..2c991a47f6 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -32,10 +32,8 @@ string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME}) message(STATUS "Build time = ${BUILD_TIME}") MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME) - execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) - if (GIT_BRANCH_NAME STREQUAL "") - execute_process(COMMAND "git" symbolic-ref --short -q HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) - endif () + execute_process(COMMAND sh "-c" "git log --decorate | head -n 1 | sed 's/.*(\\(.*\\))/\\1/' | sed 's/.* \\(.*\\),.*/\\1/' | sed 's=[a-zA-Z]*\/==g'" + OUTPUT_VARIABLE ${GIT_BRANCH_NAME}) ENDMACRO(GET_GIT_BRANCH_NAME) GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME) From 85e76aad8d94437e6a3e899bcf62b7332ddd747f Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 16:18:18 +0800 Subject: [PATCH 193/196] minor update --- install.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/install.md b/install.md index f92174ae36..4d2088a3be 100644 --- a/install.md +++ b/install.md @@ -26,8 +26,10 @@ $ cd [Milvus root path]/core $ ./build.sh -t Debug or $ ./build.sh -t Release +``` -For GPU version, add -g option +By default, it will build CPU version. To build GPU version, add `-g` option +``` $ ./build.sh -g ``` From f251d26bbb8c2a4caef1377e78490e64cae06676 Mon Sep 17 00:00:00 2001 From: wxyu Date: Wed, 13 Nov 2019 16:35:37 +0800 Subject: [PATCH 194/196] Reside src/external in thirdparty --- CHANGELOG.md | 1 + ci/jenkins/scripts/coverage.sh | 2 +- core/CMakeLists.txt | 1 + core/build-support/lint_exclusions.txt | 2 +- core/coverage.sh | 2 +- core/src/CMakeLists.txt | 13 +++++++------ core/src/index/knowhere/knowhere/common/Log.h | 2 +- core/src/index/unittest/CMakeLists.txt | 2 +- core/src/main.cpp | 2 +- core/src/utils/Json.h | 2 +- core/src/utils/Log.h | 2 +- core/src/utils/LogUtil.h | 2 +- .../easyloggingpp/easylogging++.cc | 0 .../easyloggingpp/easylogging++.h | 0 .../external => thirdparty}/nlohmann/json.hpp | 0 core/unittest/CMakeLists.txt | 15 ++++++++------- core/unittest/main.cpp | 2 +- core/unittest/wrapper/CMakeLists.txt | 2 +- core/unittest/wrapper/test_wrapper.cpp | 2 +- 19 files changed, 29 insertions(+), 25 deletions(-) rename core/{src/external => thirdparty}/easyloggingpp/easylogging++.cc (100%) rename core/{src/external => thirdparty}/easyloggingpp/easylogging++.h (100%) rename core/{src/external => thirdparty}/nlohmann/json.hpp (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02fb05d9cc..6ee8bfef0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Please mark all change in change log and use the ticket from JIRA. ## Bug - \#246 - Exclude src/external folder from code coverage for jenkin ci +- \#248 - Reside src/external in thirdparty ## Feature - \#12 - Pure CPU version for Milvus diff --git a/ci/jenkins/scripts/coverage.sh b/ci/jenkins/scripts/coverage.sh index 5c9d010d46..fa4fd76490 100755 --- a/ci/jenkins/scripts/coverage.sh +++ b/ci/jenkins/scripts/coverage.sh @@ -132,7 +132,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \ "*/src/server/Server.cpp" \ "*/src/server/DBWrapper.cpp" \ "*/src/server/grpc_impl/GrpcServer.cpp" \ - "*/src/external/*" + "*/thirdparty/*" if [ $? -ne 0 ]; then echo "gen ${FILE_INFO_OUTPUT_NEW} failed" diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 65094b19a3..11e417397b 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -112,6 +112,7 @@ endif () set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR}) set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR}) set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src) +set(MILVUS_THIRDPARTY_SRC ${PROJECT_SOURCE_DIR}/thirdparty) include(ExternalProject) include(DefineOptions) diff --git a/core/build-support/lint_exclusions.txt b/core/build-support/lint_exclusions.txt index 2be060f121..34d469fccb 100644 --- a/core/build-support/lint_exclusions.txt +++ b/core/build-support/lint_exclusions.txt @@ -6,5 +6,5 @@ *easylogging++* *SqliteMetaImpl.cpp *src/grpc* -*src/external* +*thirdparty* *milvus/include* \ No newline at end of file diff --git a/core/coverage.sh b/core/coverage.sh index 9011e290e5..833b1b0050 100755 --- a/core/coverage.sh +++ b/core/coverage.sh @@ -122,7 +122,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \ "*/src/server/Server.cpp" \ "*/src/server/DBWrapper.cpp" \ "*/src/server/grpc_impl/GrpcServer.cpp" \ - "*/src/external/*" + "*/thirdparty/*" if [ $? -ne 0 ]; then echo "generate ${FILE_INFO_OUTPUT_NEW} failed" diff --git a/core/src/CMakeLists.txt b/core/src/CMakeLists.txt index 79b5e0f1da..e0cc5d0855 100644 --- a/core/src/CMakeLists.txt +++ b/core/src/CMakeLists.txt @@ -19,6 +19,7 @@ include_directories(${MILVUS_SOURCE_DIR}) include_directories(${MILVUS_ENGINE_SRC}) +include_directories(${MILVUS_THIRDPARTY_SRC}) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status) include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus) @@ -65,11 +66,11 @@ set(scheduler_files ${scheduler_task_files} ) -aux_source_directory(${MILVUS_ENGINE_SRC}/external/easyloggingpp external_easyloggingpp_files) -aux_source_directory(${MILVUS_ENGINE_SRC}/external/nlohmann external_nlohmann_files) -set(external_files - ${external_easyloggingpp_files} - ${external_nlohmann_files} +aux_source_directory(${MILVUS_THIRDPARTY_SRC}/easyloggingpp thirdparty_easyloggingpp_files) +aux_source_directory(${MILVUS_THIRDPARTY_SRC}/nlohmann thirdparty_nlohmann_files) +set(thirdparty_files + ${thirdparty_easyloggingpp_files} + ${thirdparty_nlohmann_files} ) aux_source_directory(${MILVUS_ENGINE_SRC}/server server_files) @@ -85,7 +86,7 @@ set(engine_files ${db_insert_files} ${db_meta_files} ${metrics_files} - ${external_files} + ${thirdparty_files} ${utils_files} ${wrapper_files} ) diff --git a/core/src/index/knowhere/knowhere/common/Log.h b/core/src/index/knowhere/knowhere/common/Log.h index 369e7143af..e50a28a15e 100644 --- a/core/src/index/knowhere/knowhere/common/Log.h +++ b/core/src/index/knowhere/knowhere/common/Log.h @@ -17,7 +17,7 @@ #pragma once -#include "external/easyloggingpp/easylogging++.h" +#include "easyloggingpp/easylogging++.h" namespace knowhere { diff --git a/core/src/index/unittest/CMakeLists.txt b/core/src/index/unittest/CMakeLists.txt index 13277aa40e..d27c7e8860 100644 --- a/core/src/index/unittest/CMakeLists.txt +++ b/core/src/index/unittest/CMakeLists.txt @@ -27,7 +27,7 @@ set(basic_libs ) set(util_srcs - ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc + ${MILVUS_THIRDPARTY_SRC}/easyloggingpp/easylogging++.cc ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/FaissIO.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/index/vector_index/helpers/IndexParameter.cpp ${INDEX_SOURCE_DIR}/knowhere/knowhere/adapter/Structure.cpp diff --git a/core/src/main.cpp b/core/src/main.cpp index c5b2d2dffe..401736c34f 100644 --- a/core/src/main.cpp +++ b/core/src/main.cpp @@ -22,7 +22,7 @@ #include #include -#include "external/easyloggingpp/easylogging++.h" +#include "easyloggingpp/easylogging++.h" #include "metrics/Metrics.h" #include "server/Server.h" #include "src/config.h" diff --git a/core/src/utils/Json.h b/core/src/utils/Json.h index e5e74a3b67..235d6ac9c1 100644 --- a/core/src/utils/Json.h +++ b/core/src/utils/Json.h @@ -16,7 +16,7 @@ // under the License. #pragma once -#include "external/nlohmann/json.hpp" +#include "nlohmann/json.hpp" namespace milvus { using json = nlohmann::json; diff --git a/core/src/utils/Log.h b/core/src/utils/Log.h index 4aa3146b01..cef72a866d 100644 --- a/core/src/utils/Log.h +++ b/core/src/utils/Log.h @@ -17,7 +17,7 @@ #pragma once -#include "external/easyloggingpp/easylogging++.h" +#include "easyloggingpp/easylogging++.h" namespace milvus { diff --git a/core/src/utils/LogUtil.h b/core/src/utils/LogUtil.h index 7e5afd087a..69607c4631 100644 --- a/core/src/utils/LogUtil.h +++ b/core/src/utils/LogUtil.h @@ -17,7 +17,7 @@ #pragma once -#include "external/easyloggingpp/easylogging++.h" +#include "easyloggingpp/easylogging++.h" #include "utils/Status.h" #include diff --git a/core/src/external/easyloggingpp/easylogging++.cc b/core/thirdparty/easyloggingpp/easylogging++.cc similarity index 100% rename from core/src/external/easyloggingpp/easylogging++.cc rename to core/thirdparty/easyloggingpp/easylogging++.cc diff --git a/core/src/external/easyloggingpp/easylogging++.h b/core/thirdparty/easyloggingpp/easylogging++.h similarity index 100% rename from core/src/external/easyloggingpp/easylogging++.h rename to core/thirdparty/easyloggingpp/easylogging++.h diff --git a/core/src/external/nlohmann/json.hpp b/core/thirdparty/nlohmann/json.hpp similarity index 100% rename from core/src/external/nlohmann/json.hpp rename to core/thirdparty/nlohmann/json.hpp diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index e485bd729a..10ab362e77 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -23,6 +23,7 @@ endforeach () include_directories(${MILVUS_SOURCE_DIR}) include_directories(${MILVUS_ENGINE_SRC}) +include_directories(${MILVUS_THIRDPARTY_SRC}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files) @@ -57,11 +58,11 @@ set(scheduler_files ${scheduler_optimizer_files} ) -aux_source_directory(${MILVUS_ENGINE_SRC}/external/easyloggingpp external_easyloggingpp_files) -aux_source_directory(${MILVUS_ENGINE_SRC}/external/nlohmann external_nlohmann_files) -set(external_files - ${external_easyloggingpp_files} - ${external_nlohmann_files} +aux_source_directory(${MILVUS_THIRDPARTY_SRC}/easyloggingpp thirdparty_easyloggingpp_files) +aux_source_directory(${MILVUS_THIRDPARTY_SRC}/nlohmann thirdparty_nlohmann_files) +set(thirdparty_files + ${thirdparty_easyloggingpp_files} + ${thirdparty_nlohmann_files} ) aux_source_directory(${MILVUS_ENGINE_SRC}/server server_files) @@ -79,7 +80,7 @@ set(helper_files ${MILVUS_ENGINE_SRC}/utils/Status.cpp ${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp ${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp - ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc + ${MILVUS_THIRDPARTY_SRC}/easyloggingpp/easylogging++.cc ) set(common_files @@ -90,7 +91,7 @@ set(common_files ${db_insert_files} ${db_meta_files} ${metrics_files} - ${external_files} + ${thirdparty_files} ${scheduler_files} ${wrapper_files} ${helper_files} diff --git a/core/unittest/main.cpp b/core/unittest/main.cpp index ff70f3f2cf..7452b95821 100644 --- a/core/unittest/main.cpp +++ b/core/unittest/main.cpp @@ -18,7 +18,7 @@ #include #include -#include "external/easyloggingpp/easylogging++.h" +#include "easyloggingpp/easylogging++.h" INITIALIZE_EASYLOGGINGPP diff --git a/core/unittest/wrapper/CMakeLists.txt b/core/unittest/wrapper/CMakeLists.txt index 232abf6e1a..a320ef723d 100644 --- a/core/unittest/wrapper/CMakeLists.txt +++ b/core/unittest/wrapper/CMakeLists.txt @@ -32,7 +32,7 @@ set(wrapper_files set(util_files utils.cpp - ${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc + ${MILVUS_THIRDPARTY_SRC}/easyloggingpp/easylogging++.cc ${MILVUS_ENGINE_SRC}/utils/Status.cpp ) diff --git a/core/unittest/wrapper/test_wrapper.cpp b/core/unittest/wrapper/test_wrapper.cpp index 097cb69075..ddfdfa2fad 100644 --- a/core/unittest/wrapper/test_wrapper.cpp +++ b/core/unittest/wrapper/test_wrapper.cpp @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -#include "external/easyloggingpp/easylogging++.h" +#include "easyloggingpp/easylogging++.h" #include "wrapper/VecIndex.h" #ifdef MILVUS_GPU_VERSION From 5f4866616d0f6d33fa6a9e596a5979a1f1ff4cd7 Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 16:36:40 +0800 Subject: [PATCH 195/196] update build.sh in ci --- ci/scripts/build.sh | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 444537fe6b..27962ccb54 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -15,6 +15,7 @@ CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build" BUILD_TYPE="Debug" BUILD_UNITTEST="OFF" INSTALL_PREFIX="/opt/milvus" +FAISS_ROOT="" BUILD_COVERAGE="OFF" USE_JFROG_CACHE="OFF" RUN_CPPLINT="OFF" @@ -22,7 +23,7 @@ GPU_VERSION="OFF" WITH_MKL="OFF" CUDA_COMPILER=/usr/local/cuda/bin/nvcc -while getopts "o:t:b:gulcjmh" arg +while getopts "o:t:b:f:gulcjmh" arg do case $arg in o) @@ -34,6 +35,9 @@ do b) CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR ;; + f) + FAISS_ROOT=$OPTARG # FAISS ROOT PATH + ;; g) GPU_VERSION="ON"; ;; @@ -60,6 +64,7 @@ parameter: -o: install prefix(default: /opt/milvus) -t: build type(default: Debug) -b: core code build directory +-f: faiss root path -g: gpu version -u: building unit test options(default: OFF) -l: run cpplint, clang-format and clang-tidy(default: OFF) @@ -69,7 +74,7 @@ parameter: -h: help usage: -./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} [-u] [-l] [-c] [-j] [-m] [-h] +./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} -f \${FAISS_ROOT} [-u] [-l] [-c] [-j] [-m] [-h] " exit 0 ;; @@ -94,8 +99,10 @@ CMAKE_CMD="cmake \ -DBUILD_UNIT_TEST=${BUILD_UNITTEST} \ -DBUILD_COVERAGE=${BUILD_COVERAGE} \ -DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \ --DBUILD_FAISS_WITH_MKL=${WITH_MKL} \ +-DFAISS_ROOT=${FAISS_ROOT} \ +-DFAISS_WITH_MKL=${WITH_MKL} \ -DArrow_SOURCE=AUTO \ +-DFAISS_SOURCE=AUTO \ ${MILVUS_CORE_DIR}" echo ${CMAKE_CMD} ${CMAKE_CMD} From ff7452a21fc9f8ef80aa0162c1d9f250d8aec6c4 Mon Sep 17 00:00:00 2001 From: Zhiru Zhu Date: Wed, 13 Nov 2019 16:53:23 +0800 Subject: [PATCH 196/196] fix faiss source url --- core/src/index/cmake/ThirdPartyPackagesCore.cmake | 2 +- core/src/index/thirdparty/versions.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/index/cmake/ThirdPartyPackagesCore.cmake b/core/src/index/cmake/ThirdPartyPackagesCore.cmake index 854b295b1a..c046bc3a56 100644 --- a/core/src/index/cmake/ThirdPartyPackagesCore.cmake +++ b/core/src/index/cmake/ThirdPartyPackagesCore.cmake @@ -228,7 +228,7 @@ endforeach () if (DEFINED ENV{FAISS_SOURCE_URL}) set(FAISS_SOURCE_URL "$ENV{FAISS_SOURCE_URL}") else () - set(FAISS_SOURCE_URL "https://github.com/milvus-io/faiss/archive/${FAISS_VERSION}.tar.gz") + set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/${FAISS_VERSION}.tar.gz") set(FAISS_MD5 "b02c1a53234f5acc9bea1b0c55524f50") endif () diff --git a/core/src/index/thirdparty/versions.txt b/core/src/index/thirdparty/versions.txt index 380c9dedad..efcef26fa9 100644 --- a/core/src/index/thirdparty/versions.txt +++ b/core/src/index/thirdparty/versions.txt @@ -3,5 +3,5 @@ BOOST_VERSION=1.70.0 GTEST_VERSION=1.8.1 LAPACK_VERSION=v3.8.0 OPENBLAS_VERSION=v0.3.6 -FAISS_VERSION=branch-0.3.0 +FAISS_VERSION=1.6.0 MKL_VERSION=2019.5.281